gasmichel commited on
Commit
306b828
·
verified ·
1 Parent(s): 9afee78

Delete folder .ipynb_checkpoints with huggingface_hub

Browse files
.ipynb_checkpoints/config-checkpoint.json DELETED
@@ -1,14 +0,0 @@
1
- {
2
- "architectures": [
3
- "UARScene"
4
- ],
5
- "auto_map": {
6
- "AutoConfig": "config.LUARConfig",
7
- "AutoModel": "model.UARScene"
8
- },
9
- "embedding_size": 512,
10
- "model_type": "LUAR",
11
- "torch_dtype": "float32",
12
- "transformers_version": "4.33.2",
13
- "use_memory_efficient_attention": false
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/config-checkpoint.py DELETED
@@ -1,11 +0,0 @@
1
- from transformers import PretrainedConfig
2
-
3
- class LUARConfig(PretrainedConfig):
4
- model_type = "LUAR"
5
-
6
- def __init__(self,
7
- embedding_size: int = 512,
8
- **kwargs,
9
- ):
10
- self.embedding_size = embedding_size
11
- super().__init__(**kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/model-checkpoint.py DELETED
@@ -1,95 +0,0 @@
1
- import os
2
- from functools import partial
3
-
4
- import torch
5
- import torch.nn as nn
6
- from einops import rearrange, reduce, repeat
7
- from transformers import AutoModel
8
- import torch.nn.functional as F
9
-
10
- # from models.layers import MemoryEfficientAttention, SelfAttention
11
- from huggingface_hub import PyTorchModelHubMixin
12
-
13
- from transformers import AutoModel, PreTrainedModel
14
-
15
-
16
- class UARScene(PreTrainedModel):
17
- """Defines the SBERT model.
18
- """
19
- def __init__(self, config):
20
- super().__init__()
21
- self.config=config
22
- self.create_transformer()
23
-
24
- self.linear = nn.Linear(self.hidden_size, self.config.embedding_size)
25
-
26
- def attn_fn(self, k, q ,v) :
27
- d_k = q.size(-1)
28
- scores = torch.matmul(k, q.transpose(-2, -1)) / math.sqrt(d_k)
29
- p_attn = F.softmax(scores, dim=-1)
30
-
31
- return torch.matmul(p_attn, v)
32
-
33
-
34
- def create_transformer(self):
35
- """Creates the Transformer model.
36
- """
37
-
38
- self.transformer = AutoModel.from_pretrained("sentence-transformers/all-distilroberta-v1")
39
- self.hidden_size = self.transformer.config.hidden_size
40
- self.num_attention_heads = self.transformer.config.num_attention_heads
41
- self.dim_head = self.hidden_size // self.num_attention_heads
42
-
43
- def mean_pooling(self, token_embeddings, attention_mask):
44
- """Mean Pooling as described in the SBERT paper.
45
- """
46
- input_mask_expanded = repeat(attention_mask, 'b l -> b l d', d=self.hidden_size).float()
47
- sum_embeddings = reduce(token_embeddings * input_mask_expanded, 'b l d -> b d', 'sum')
48
- sum_mask = torch.clamp(reduce(input_mask_expanded, 'b l d -> b d', 'sum'), min=1e-9)
49
- return sum_embeddings / sum_mask
50
-
51
- def get_episode_embeddings(self, data):
52
- """Computes the Author Embedding.
53
- """
54
- # batch_size, num_sample_per_author, episode_length
55
- input_ids, attention_mask = data[0].unsqueeze(1), data[1].unsqueeze(1)
56
- B, N, E, _ = input_ids.shape
57
-
58
- input_ids = rearrange(input_ids, 'b n e l -> (b n e) l')
59
- attention_mask = rearrange(attention_mask, 'b n e l -> (b n e) l')
60
-
61
- outputs = self.transformer(
62
- input_ids=input_ids,
63
- attention_mask=attention_mask,
64
- return_dict=True,
65
- output_hidden_states=True
66
- )
67
-
68
- # at this point, we're embedding individual "comments"
69
- comment_embeddings = self.mean_pooling(outputs['last_hidden_state'], attention_mask)
70
- comment_embeddings = rearrange(comment_embeddings, '(b n e) l -> (b n) e l', b=B, n=N, e=E)
71
-
72
- # aggregate individual comments embeddings into episode embeddings
73
- episode_embeddings = self.attn_fn(comment_embeddings, comment_embeddings, comment_embeddings)
74
- episode_embeddings = reduce(episode_embeddings, 'b e l -> b l', 'max')
75
-
76
- episode_embeddings = self.linear(episode_embeddings)
77
- return episode_embeddings, comment_embeddings
78
-
79
- def forward(self, input_ids, attention_mask):
80
- """Calculates a fixed-length feature vector for a batch of episode samples.
81
- """
82
- data = [input_ids, attention_mask]
83
- episode_embeddings,_ = self.get_episode_embeddings(data)
84
-
85
- return episode_embeddings
86
-
87
- def _model_forward(self, batch):
88
- """Passes a batch of data through the model.
89
- This is used in the lightning_trainer.py file.
90
- """
91
- data, _, _ = batch
92
- episode_embeddings, comment_embeddings = self.forward(data)
93
- # labels = torch.flatten(labels)
94
-
95
- return episode_embeddings, comment_embeddings