|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
from transformers import PreTrainedModel, PretrainedConfig |
|
|
|
|
|
class PyPilotConfig(PretrainedConfig): |
|
|
model_type = "pypilot" |
|
|
|
|
|
def __init__(self, vocab_size=50000, hidden_size=768, num_layers=12, **kwargs): |
|
|
self.vocab_size = vocab_size |
|
|
self.hidden_size = hidden_size |
|
|
self.num_layers = num_layers |
|
|
super().__init__(**kwargs) |
|
|
|
|
|
class PyPilotModel(PreTrainedModel): |
|
|
config_class = PyPilotConfig |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__(config) |
|
|
self.embedding = nn.Embedding(config.vocab_size, config.hidden_size) |
|
|
self.transformer_blocks = nn.ModuleList([ |
|
|
nn.TransformerEncoderLayer(config.hidden_size, 8) |
|
|
for _ in range(config.num_layers) |
|
|
]) |
|
|
self.output_layer = nn.Linear(config.hidden_size, config.vocab_size) |
|
|
|
|
|
def forward(self, input_ids): |
|
|
x = self.embedding(input_ids) |
|
|
for block in self.transformer_blocks: |
|
|
x = block(x) |
|
|
return self.output_layer(x) |