| import torch | |
| import torch.nn as nn | |
| from transformers import PreTrainedModel, PretrainedConfig | |
| class ChessConfig(PretrainedConfig): | |
| model_type = "chess_lm" | |
| def __init__( | |
| self, | |
| vocab_size=1200, | |
| n_positions=256, | |
| n_embd=128, | |
| n_layer=4, | |
| n_head=4, | |
| n_ctx=256, | |
| tie_word_embeddings=True, | |
| **kwargs, | |
| ): | |
| self.vocab_size = vocab_size | |
| self.n_positions = n_positions | |
| self.n_embd = n_embd | |
| self.n_layer = n_layer | |
| self.n_head = n_head | |
| self.n_ctx = n_ctx | |
| self.tie_word_embeddings = tie_word_embeddings | |
| super().__init__(**kwargs) | |
| class ChessForCausalLM(PreTrainedModel): | |
| config_class = ChessConfig | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.config = config | |
| self.token_embedding = nn.Embedding(config.vocab_size, config.n_embd) | |
| self.position_embedding = nn.Embedding(config.n_positions, config.n_embd) | |
| encoder_layer = nn.TransformerEncoderLayer( | |
| d_model=config.n_embd, nhead=config.n_head, dim_feedforward=config.n_embd * 4, | |
| batch_first=True, norm_first=True | |
| ) | |
| self.blocks = nn.TransformerEncoder(encoder_layer, num_layers=config.n_layer) | |
| self.ln_f = nn.LayerNorm(config.n_embd) | |
| self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False) | |
| if config.tie_word_embeddings: | |
| self.head.weight = self.token_embedding.weight | |
| self.post_init() | |
| def get_input_embeddings(self): return self.token_embedding | |
| def set_input_embeddings(self, value): self.token_embedding = value | |
| def forward(self, input_ids, labels=None, **kwargs): | |
| B, T = input_ids.shape | |
| tok_emb = self.token_embedding(input_ids) | |
| pos_emb = self.position_embedding(torch.arange(T, device=input_ids.device)) | |
| x = tok_emb + pos_emb | |
| mask = torch.triu(torch.ones(T, T, device=input_ids.device) * float('-inf'), diagonal=1) | |
| x = self.blocks(x, mask=mask, is_causal=True) | |
| x = self.ln_f(x) | |
| logits = self.head(x) | |
| loss = None | |
| if labels is not None: | |
| shift_logits = logits[..., :-1, :].contiguous() | |
| shift_labels = labels[..., 1:].contiguous() | |
| loss_fct = nn.CrossEntropyLoss() | |
| loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1)) | |
| return (loss, logits) if loss is not None else logits | |
| def print_parameter_budget(config): | |
| print(f"Model params: Check") | |