YixuanWeng commited on
Commit
095eca9
1 Parent(s): ee37659

Delete readme.md

Browse files
Files changed (1) hide show
  1. readme.md +0 -76
readme.md DELETED
@@ -1,76 +0,0 @@
1
- # Multilingual SimCSE
2
-
3
- #### A contrastive learning model using parallel language pair training
4
-
5
- ##### By using parallel sentence pairs in different languages, the text is mapped to the same vector space for pre-training similar to Simcse
6
-
7
- ##### Firstly, the [mDeBERTa](https://huggingface.co/microsoft/mdeberta-v3-base) model is used to load the pre-training parameters, and then the pre-training is carried out based on the [CCMatrix](https://github.com/facebookresearch/LASER/tree/main/tasks/CCMatrix) data set.
8
-
9
- ##### Training data: 100 million parallel pairs
10
-
11
- ##### Taining equipment: 4 * 3090
12
-
13
-
14
-
15
- ## Pipline Code
16
-
17
- ```
18
- from transformers import AutoModel,AutoTokenizer
19
-
20
- model = AutoModel.from_pretrained('WENGSYX/Multilingual_SimCSE')
21
- tokenizer = AutoTokenizer.from_pretrained('WENGSYX/Multilingual_SimCSE')
22
-
23
- word1 = tokenizer('Hello,world.',return_tensors='pt')
24
- word2 = tokenizer('你好,世界',return_tensors='pt')
25
- out1 = model(**word1).last_hidden_state.mean(1)
26
- out2 = model(**word2).last_hidden_state.mean(1)
27
- print(F.cosine_similarity(out1,out2))
28
- ----------------------------------------------------
29
- tensor([0.8758], grad_fn=<DivBackward0>)
30
- ```
31
-
32
-
33
-
34
- ## Train Code
35
-
36
-
37
-
38
- ```
39
- from transformers import AutoModel,AutoTokenizer,AdamW
40
-
41
- model = AutoModel.from_pretrained('WENGSYX/Multilingual_SimCSE')
42
- tokenizer = AutoTokenizer.from_pretrained('WENGSYX/Multilingual_SimCSE')
43
- optimizer = AdamW(model.parameters(),lr=1e-5)
44
-
45
- def compute_loss(y_pred, t=0.05, device="cuda"):
46
- idxs = torch.arange(0, y_pred.shape[0], device=device)
47
- y_true = idxs + 1 - idxs % 2 * 2
48
- similarities = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=2)
49
- similarities = similarities - torch.eye(y_pred.shape[0], device=device) * 1e12
50
- similarities = similarities / t
51
- loss = F.cross_entropy(similarities, y_true)
52
- return torch.mean(loss)
53
-
54
- wordlist = [['Hello,world','你好,世界'],['Pensa che il bianco rappresenti la purezza.','Он думает, что белые символизируют чистоту.']]
55
-
56
- input_ids, attention_mask, token_type_ids = [], [], []
57
- for x in wordlist:
58
- text1 = tokenizer(x[0], padding='max_length', truncation=True, max_length=512)
59
- input_ids.append(text1['input_ids'])
60
- attention_mask.append(text1['attention_mask'])
61
- text2 = tokenizer(x[1], padding='max_length', truncation=True, max_length=512)
62
- input_ids.append(text2['input_ids'])
63
- attention_mask.append(text2['attention_mask'])
64
-
65
- input_ids = torch.tensor(input_ids,device=device)
66
- attention_mask = torch.tensor(attention_mask,device=device)
67
-
68
- output = model(input_ids=input_ids,attention_mask=attention_mask)
69
- output = output.last_hidden_state.mean(1)
70
- loss = compute_loss(output)
71
- loss.backward()
72
-
73
- optimizer.step()
74
- optimizer.zero_grad()
75
- ```
76
-