File size: 10,489 Bytes
065b3d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
import math
import torch
import torch.nn as nn
from torch.nn import functional as F


class CosmicConfig:
    """Configuration class for CosmicFish."""

    def __init__(self,
                 vocab_size=50257,
                 block_size=512,
                 n_layer=10,
                 n_head=16,
                 n_embd=640,
                 bias=True,
                 dropout=0.0,  # Always 0 for inference
                 n_query_groups=4,
                 eps=1e-6,
                 use_rotary=True,
                 use_swiglu=True,
                 use_qk_norm=False,
                 use_gqa=True):
        self.vocab_size = vocab_size
        self.block_size = block_size
        self.n_layer = n_layer
        self.n_head = n_head
        self.n_embd = n_embd
        self.bias = bias
        self.dropout = dropout
        self.eps = eps
        self.use_rotary = use_rotary
        self.use_swiglu = use_swiglu
        self.use_qk_norm = use_qk_norm
        self.use_gqa = use_gqa
        self.n_query_groups = n_query_groups if use_gqa else n_head
        # Ensure n_head is divisible by n_query_groups
        assert n_head % self.n_query_groups == 0, "n_head must be divisible by n_query_groups"


class RMSNorm(nn.Module):
    """Root Mean Square Normalization"""

    def __init__(self, dim, eps=1e-6):
        super().__init__()
        self.eps = eps
        self.weight = nn.Parameter(torch.ones(dim))

    def forward(self, x):
        rms = torch.sqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps)
        return self.weight * (x / rms)


def precompute_freqs_cis(dim, end, theta=10000.0):
    """Precompute the frequency tensor for complex exponentials (cis)"""
    freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
    t = torch.arange(end, device=freqs.device)
    freqs = torch.outer(t, freqs)
    freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
    return freqs_cis


def apply_rotary_emb(xq, xk, freqs_cis):
    """Apply rotary embeddings to input tensors"""
    xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
    xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))

    seq_len = xq_.size(2)
    if freqs_cis.size(0) < seq_len:
        raise ValueError(f"freqs_cis has only {freqs_cis.size(0)} values but sequence length is {seq_len}")

    freqs_cis_seq = freqs_cis[:seq_len]
    xq_out = torch.view_as_real(xq_ * freqs_cis_seq.unsqueeze(0)).flatten(3)
    xk_out = torch.view_as_real(xk_ * freqs_cis_seq.unsqueeze(0)).flatten(3)

    return xq_out.type_as(xq), xk_out.type_as(xk)


class GroupedQueryAttention(nn.Module):
    """Grouped Query Attention (GQA) implementation"""

    def __init__(self, config):
        super().__init__()
        assert config.n_embd % config.n_head == 0

        head_dim = config.n_embd // config.n_head
        self.head_dim = head_dim
        self.n_head = config.n_head
        self.n_embd = config.n_embd
        self.n_query_groups = config.n_query_groups

        self.kv_heads = config.n_head // config.n_query_groups if config.use_gqa else config.n_head
        qkv_proj_size = (config.n_head + 2 * self.kv_heads) * head_dim

        self.c_attn = nn.Linear(config.n_embd, qkv_proj_size, bias=config.bias)
        self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)

        # Flash attention support
        self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
        if not self.flash:
            self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
                                 .view(1, 1, config.block_size, config.block_size))

        # Query-key normalization
        self.qk_norm = getattr(config, 'use_qk_norm', False)
        if self.qk_norm:
            self.q_norm = RMSNorm(head_dim, eps=getattr(config, 'eps', 1e-6))
            self.k_norm = RMSNorm(head_dim, eps=getattr(config, 'eps', 1e-6))

    def forward(self, x, freqs_cis=None):
        B, T, C = x.size()
        qkv = self.c_attn(x)
        head_dim = C // self.n_head

        q_size = self.n_head * head_dim
        k_size = self.kv_heads * head_dim
        v_size = self.kv_heads * head_dim

        q, k, v = qkv.split([q_size, k_size, v_size], dim=2)

        q = q.view(B, T, self.n_head, head_dim).transpose(1, 2)
        k = k.view(B, T, self.kv_heads, head_dim).transpose(1, 2)
        v = v.view(B, T, self.kv_heads, head_dim).transpose(1, 2)

        # Repeat k and v if needed for GQA
        if self.kv_heads < self.n_head:
            repeats = self.n_head // self.kv_heads
            k = k.repeat_interleave(repeats, dim=1)
            v = v.repeat_interleave(repeats, dim=1)

        # Apply rotary embeddings
        if freqs_cis is not None:
            q, k = apply_rotary_emb(q, k, freqs_cis)

        # Apply query-key normalization
        if self.qk_norm:
            q = self.q_norm(q)
            k = self.k_norm(k)

        # Compute attention
        if self.flash:
            y = torch.nn.functional.scaled_dot_product_attention(
                q, k, v, attn_mask=None, dropout_p=0.0, is_causal=True
            )
        else:
            att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
            att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float('-inf'))
            att = F.softmax(att, dim=-1)
            y = att @ v

        y = y.transpose(1, 2).contiguous().view(B, T, C)
        y = self.c_proj(y)
        return y


class Block(nn.Module):
    """Transformer block"""

    def __init__(self, config):
        super().__init__()
        self.ln_1 = RMSNorm(config.n_embd, eps=config.eps)
        self.ln_2 = RMSNorm(config.n_embd, eps=config.eps)
        self.attn = GroupedQueryAttention(config)

        # MLP implementation based on configuration
        if config.use_swiglu:
            # SwiGLU MLP
            self.mlp = nn.ModuleDict(dict(
                gate=nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias),
                up=nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias),
                down=nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias),
                act=nn.SiLU(),
            ))
            m = self.mlp
            self.mlpf = lambda x: m.down(m.act(m.up(x)) * m.gate(x))
        else:
            # Traditional MLP
            self.mlp = nn.ModuleDict(dict(
                c_fc=nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias),
                c_proj=nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias),
                act=nn.GELU(),
            ))
            m = self.mlp
            self.mlpf = lambda x: m.c_proj(m.act(m.c_fc(x)))

    def forward(self, x, freqs_cis=None):
        x = x + self.attn(self.ln_1(x), freqs_cis)
        x = x + self.mlpf(self.ln_2(x))
        return x


class CosmicFish(nn.Module):
    """
    CosmicFish model for inference only.
    Features: Rotary Positional Embeddings, Grouped-Query Attention, SwiGLU, RMSNorm
    """

    def __init__(self, config):
        super().__init__()
        self.config = config

        self.transformer = nn.ModuleDict(dict(
            wte=nn.Embedding(config.vocab_size, config.n_embd),
            h=nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
            ln_f=RMSNorm(config.n_embd, eps=config.eps),
        ))

        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)

        # Share weights between embedding and output
        self.transformer.wte.weight = self.lm_head.weight

        # Precompute rotary embedding frequencies
        if config.use_rotary:
            head_dim = config.n_embd // config.n_head
            self.freqs_cis = precompute_freqs_cis(head_dim, config.block_size)
        else:
            self.freqs_cis = None
            self.transformer.wpe = nn.Embedding(config.block_size, config.n_embd)

    def get_num_params(self, non_embedding=True):
        """Return the number of parameters in the model."""
        n_params = sum(p.numel() for p in self.parameters())
        if non_embedding and hasattr(self.transformer, 'wpe'):
            n_params -= self.transformer.wpe.weight.numel()
        return n_params

    def forward(self, idx, targets=None):
        """Forward pass through the model."""
        device = idx.device
        b, t = idx.size()
        assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"

        # Get token embeddings
        tok_emb = self.transformer.wte(idx)

        # Handle positional embeddings
        if self.config.use_rotary:
            x = tok_emb
            freqs_cis = self.freqs_cis.to(device) if self.freqs_cis is not None else None
        else:
            pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0)
            pos_emb = self.transformer.wpe(pos)
            x = tok_emb + pos_emb
            freqs_cis = None

        # Apply transformer blocks
        for block in self.transformer.h:
            x = block(x, freqs_cis)

        # Apply final normalization
        x = self.transformer.ln_f(x)

        # Calculate outputs
        if targets is not None:
            logits = self.lm_head(x)
            loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
        else:
            # For inference, only compute logits for the last token
            logits = self.lm_head(x[:, [-1], :])
            loss = None

        return logits, loss

    @torch.no_grad()
    def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
        """
        Generate text by sampling from the model, token by token.
        """
        for _ in range(max_new_tokens):
            # Crop sequence to block size if needed
            idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]

            # Forward pass
            logits, _ = self(idx_cond)
            logits = logits[:, -1, :] / temperature

            # Apply top-k sampling
            if top_k is not None:
                v, _ = torch.topk(logits, top_k)
                logits[logits < v[:, [-1]]] = -float('Inf')

            # Sample next token
            probs = F.softmax(logits, dim=-1)
            idx_next = torch.multinomial(probs, num_samples=1)

            # Append to sequence
            idx = torch.cat((idx, idx_next), dim=1)

        return idx