Sadjad Alikhani commited on
Commit
23274c4
·
verified ·
1 Parent(s): af1944d

Update lwm_model.py

Browse files
Files changed (1) hide show
  1. lwm_model.py +152 -153
lwm_model.py CHANGED
@@ -1,153 +1,152 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- Created on Sun Sep 15 19:55:23 2024
4
-
5
- @author: salikha4
6
- """
7
-
8
- # import os
9
- import torch
10
- import torch.nn as nn
11
- import torch.nn.functional as F
12
- import numpy as np
13
- # from inference import *
14
- # from input_preprocess import *
15
-
16
-
17
- ELEMENT_LENGTH = 16
18
- D_MODEL = 64
19
- MAX_LEN = 129
20
- N_LAYERS = 12
21
- N_HEADS = 12
22
- D_FF = D_MODEL * 4
23
- D_K = D_MODEL // N_HEADS
24
- D_V = D_MODEL // N_HEADS
25
- DROPOUT = 0.1
26
-
27
- class LayerNormalization(nn.Module):
28
- def __init__(self, d_model: int, eps: float = 1e-6) -> None:
29
- super().__init__()
30
- self.eps = eps
31
- self.alpha = nn.Parameter(torch.ones(d_model))
32
- self.bias = nn.Parameter(torch.zeros(d_model))
33
-
34
- def forward(self, x):
35
- mean = x.mean(dim=-1, keepdim=True)
36
- std = x.std(dim=-1, keepdim=True)
37
- return self.alpha * (x - mean) / (std + self.eps) + self.bias
38
-
39
- class Embedding(nn.Module):
40
- def __init__(self, element_length, d_model, max_len):
41
- super().__init__()
42
- self.element_length = element_length
43
- self.d_model = d_model
44
- self.proj = nn.Linear(element_length, d_model)
45
- self.pos_embed = nn.Embedding(max_len, d_model)
46
- self.norm = LayerNormalization(d_model)
47
-
48
- def forward(self, x):
49
- seq_len = x.size(1)
50
- pos = torch.arange(seq_len, dtype=torch.long, device=x.device)
51
- pos = pos.unsqueeze(0).expand_as(x[:, :, 0])
52
- tok_emb = self.proj(x.float())
53
- embedding = tok_emb + self.pos_embed(pos)
54
- return self.norm(embedding)
55
-
56
- class ScaledDotProductAttention(nn.Module):
57
- def __init__(self):
58
- super().__init__()
59
-
60
- def forward(self, Q, K, V):
61
- scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(D_K)
62
- attn = F.softmax(scores, dim=-1)
63
- context = torch.matmul(attn, V)
64
- return context, attn
65
-
66
- class MultiHeadAttention(nn.Module):
67
- def __init__(self):
68
- super().__init__()
69
- self.W_Q = nn.Linear(D_MODEL, D_K * N_HEADS)
70
- self.W_K = nn.Linear(D_MODEL, D_K * N_HEADS)
71
- self.W_V = nn.Linear(D_MODEL, D_V * N_HEADS)
72
- self.linear = nn.Linear(N_HEADS * D_V, D_MODEL)
73
- self.norm = LayerNormalization(D_MODEL)
74
- self.dropout = nn.Dropout(DROPOUT)
75
-
76
- def forward(self, Q, K, V):
77
- residual, batch_size = Q, Q.size(0)
78
- q_s = self.W_Q(Q).view(batch_size, -1, N_HEADS, D_K).transpose(1, 2)
79
- k_s = self.W_K(K).view(batch_size, -1, N_HEADS, D_K).transpose(1, 2)
80
- v_s = self.W_V(V).view(batch_size, -1, N_HEADS, D_V).transpose(1, 2)
81
-
82
- context, attn = ScaledDotProductAttention()(q_s, k_s, v_s)
83
- output = context.transpose(1, 2).contiguous().view(batch_size, -1, N_HEADS * D_V)
84
- output = self.linear(output)
85
- return residual + self.dropout(output), attn #residual + self.dropout(output), attn
86
-
87
- class PoswiseFeedForwardNet(nn.Module):
88
- def __init__(self):
89
- super().__init__()
90
- self.fc1 = nn.Linear(D_MODEL, D_FF)
91
- self.fc2 = nn.Linear(D_FF, D_MODEL)
92
- self.dropout = nn.Dropout(DROPOUT)
93
- self.norm = LayerNormalization(D_MODEL)
94
-
95
- def forward(self, x):
96
- output = self.fc2(self.dropout(F.relu(self.fc1(x))))
97
- return x + self.dropout(output) #x + self.dropout(output)
98
-
99
- class EncoderLayer(nn.Module):
100
- def __init__(self):
101
- super().__init__()
102
- self.enc_self_attn = MultiHeadAttention()
103
- self.pos_ffn = PoswiseFeedForwardNet()
104
- self.norm = LayerNormalization(D_MODEL)
105
-
106
- def forward(self, enc_inputs):
107
- attn_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs)
108
- attn_outputs = self.norm(attn_outputs)
109
- enc_outputs = self.pos_ffn(attn_outputs)
110
- return enc_outputs, attn
111
-
112
- class LWM(torch.nn.Module):
113
- def __init__(self, element_length=16, d_model=64, max_len=129, n_layers=12):
114
- super().__init__()
115
- self.embedding = Embedding(element_length, d_model, max_len)
116
- self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
117
- self.linear = nn.Linear(d_model, d_model)
118
- self.norm = LayerNormalization(d_model)
119
-
120
- embed_weight = self.embedding.proj.weight
121
- d_model, n_dim = embed_weight.size()
122
- self.decoder = nn.Linear(d_model, n_dim, bias=False)
123
- self.decoder.weight = nn.Parameter(embed_weight.transpose(0, 1))
124
- self.decoder_bias = nn.Parameter(torch.zeros(n_dim))
125
-
126
- @classmethod
127
- def from_pretrained(cls, ckpt_name='model_weights.pth', device='cuda', use_auth_token=None):
128
- # Define model
129
- model = cls().to(device)
130
-
131
- # Download model weights using Hugging Face Hub
132
- # ckpt_path = hf_hub_download(repo_id="sadjadalikhani/LWM", filename=ckpt_name, use_auth_token=use_auth_token)
133
- ckpt_path = ckpt_name
134
-
135
- # Load the model weights
136
- model.load_state_dict(torch.load(ckpt_path, map_location=device))
137
- print(f"Model loaded successfully from {ckpt_path} to {device}")
138
-
139
- return model
140
-
141
- def forward(self, input_ids, masked_pos):
142
- # Forward pass
143
- output = self.embedding(input_ids)
144
- for layer in self.layers:
145
- output, _ = layer(output)
146
-
147
- masked_pos = masked_pos.long()[:, :, None].expand(-1, -1, output.size(-1))
148
- h_masked = torch.gather(output, 1, masked_pos)
149
- h_masked = self.norm(F.relu(self.linear(h_masked)))
150
- logits_lm = self.decoder(h_masked) + self.decoder_bias
151
-
152
- return logits_lm, output
153
-
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ import random
6
+
7
+ # Set manual seed for reproducibility
8
+ def set_random_seed(seed=42):
9
+ torch.manual_seed(seed)
10
+ np.random.seed(seed)
11
+ random.seed(seed)
12
+ if torch.cuda.is_available():
13
+ torch.cuda.manual_seed_all(seed)
14
+ # Ensures deterministic behavior
15
+ torch.backends.cudnn.deterministic = True
16
+ torch.backends.cudnn.benchmark = False
17
+
18
+ # Call the seed function
19
+ set_random_seed()
20
+
21
+ ELEMENT_LENGTH = 16
22
+ D_MODEL = 64
23
+ MAX_LEN = 129
24
+ N_LAYERS = 12
25
+ N_HEADS = 12
26
+ D_FF = D_MODEL * 4
27
+ D_K = D_MODEL // N_HEADS
28
+ D_V = D_MODEL // N_HEADS
29
+ DROPOUT = 0.1
30
+
31
+ class LayerNormalization(nn.Module):
32
+ def __init__(self, d_model: int, eps: float = 1e-6) -> None:
33
+ super().__init__()
34
+ self.eps = eps
35
+ self.alpha = nn.Parameter(torch.ones(d_model))
36
+ self.bias = nn.Parameter(torch.zeros(d_model))
37
+
38
+ def forward(self, x):
39
+ mean = x.mean(dim=-1, keepdim=True)
40
+ std = x.std(dim=-1, keepdim=True)
41
+ return self.alpha * (x - mean) / (std + self.eps) + self.bias
42
+
43
+ class Embedding(nn.Module):
44
+ def __init__(self, element_length, d_model, max_len):
45
+ super().__init__()
46
+ self.element_length = element_length
47
+ self.d_model = d_model
48
+ self.proj = nn.Linear(element_length, d_model)
49
+ self.pos_embed = nn.Embedding(max_len, d_model)
50
+ self.norm = LayerNormalization(d_model)
51
+
52
+ def forward(self, x):
53
+ seq_len = x.size(1)
54
+ pos = torch.arange(seq_len, dtype=torch.long, device=x.device)
55
+ pos = pos.unsqueeze(0).expand_as(x[:, :, 0])
56
+ tok_emb = self.proj(x.float()) # Ensure consistency in floating-point precision
57
+ embedding = tok_emb + self.pos_embed(pos)
58
+ return self.norm(embedding)
59
+
60
+ class ScaledDotProductAttention(nn.Module):
61
+ def __init__(self):
62
+ super().__init__()
63
+
64
+ def forward(self, Q, K, V):
65
+ scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(D_K)
66
+ attn = F.softmax(scores, dim=-1)
67
+ context = torch.matmul(attn, V)
68
+ return context, attn
69
+
70
+ class MultiHeadAttention(nn.Module):
71
+ def __init__(self):
72
+ super().__init__()
73
+ self.W_Q = nn.Linear(D_MODEL, D_K * N_HEADS)
74
+ self.W_K = nn.Linear(D_MODEL, D_K * N_HEADS)
75
+ self.W_V = nn.Linear(D_MODEL, D_V * N_HEADS)
76
+ self.linear = nn.Linear(N_HEADS * D_V, D_MODEL)
77
+ self.norm = LayerNormalization(D_MODEL)
78
+ self.dropout = nn.Dropout(DROPOUT)
79
+
80
+ def forward(self, Q, K, V):
81
+ residual, batch_size = Q, Q.size(0)
82
+ q_s = self.W_Q(Q).view(batch_size, -1, N_HEADS, D_K).transpose(1, 2)
83
+ k_s = self.W_K(K).view(batch_size, -1, N_HEADS, D_K).transpose(1, 2)
84
+ v_s = self.W_V(V).view(batch_size, -1, N_HEADS, D_V).transpose(1, 2)
85
+
86
+ context, attn = ScaledDotProductAttention()(q_s, k_s, v_s)
87
+ output = context.transpose(1, 2).contiguous().view(batch_size, -1, N_HEADS * D_V)
88
+ output = self.linear(output)
89
+ return residual + self.dropout(output), attn
90
+
91
+ class PoswiseFeedForwardNet(nn.Module):
92
+ def __init__(self):
93
+ super().__init__()
94
+ self.fc1 = nn.Linear(D_MODEL, D_FF)
95
+ self.fc2 = nn.Linear(D_FF, D_MODEL)
96
+ self.dropout = nn.Dropout(DROPOUT)
97
+ self.norm = LayerNormalization(D_MODEL)
98
+
99
+ def forward(self, x):
100
+ output = self.fc2(self.dropout(F.relu(self.fc1(x))))
101
+ return x + self.dropout(output)
102
+
103
+ class EncoderLayer(nn.Module):
104
+ def __init__(self):
105
+ super().__init__()
106
+ self.enc_self_attn = MultiHeadAttention()
107
+ self.pos_ffn = PoswiseFeedForwardNet()
108
+ self.norm = LayerNormalization(D_MODEL)
109
+
110
+ def forward(self, enc_inputs):
111
+ attn_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs)
112
+ attn_outputs = self.norm(attn_outputs)
113
+ enc_outputs = self.pos_ffn(attn_outputs)
114
+ return enc_outputs, attn
115
+
116
+ class LWM(torch.nn.Module):
117
+ def __init__(self, element_length=16, d_model=64, max_len=129, n_layers=12):
118
+ super().__init__()
119
+ self.embedding = Embedding(element_length, d_model, max_len)
120
+ self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
121
+ self.linear = nn.Linear(d_model, d_model)
122
+ self.norm = LayerNormalization(d_model)
123
+
124
+ embed_weight = self.embedding.proj.weight
125
+ d_model, n_dim = embed_weight.size()
126
+ self.decoder = nn.Linear(d_model, n_dim, bias=False)
127
+ self.decoder.weight = nn.Parameter(embed_weight.transpose(0, 1))
128
+ self.decoder_bias = nn.Parameter(torch.zeros(n_dim))
129
+
130
+ @classmethod
131
+ def from_pretrained(cls, ckpt_name='model_weights.pth', device='cuda', use_auth_token=None):
132
+ # Define model
133
+ model = cls().to(device)
134
+
135
+ # Load model weights
136
+ ckpt_path = ckpt_name
137
+ model.load_state_dict(torch.load(ckpt_path, map_location=device))
138
+ print(f"Model loaded successfully from {ckpt_path} to {device}")
139
+
140
+ return model
141
+
142
+ def forward(self, input_ids, masked_pos):
143
+ output = self.embedding(input_ids)
144
+ for layer in self.layers:
145
+ output, _ = layer(output)
146
+
147
+ masked_pos = masked_pos.long()[:, :, None].expand(-1, -1, output.size(-1))
148
+ h_masked = torch.gather(output, 1, masked_pos)
149
+ h_masked = self.norm(F.relu(self.linear(h_masked)))
150
+ logits_lm = self.decoder(h_masked) + self.decoder_bias
151
+
152
+ return logits_lm, output