F-Haru commited on
Commit
e67a6fb
·
1 Parent(s): 3ca32d9

Upload 8 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ en-other_tab.txt filter=lfs diff=lfs merge=lfs -text
37
+ ref.txt filter=lfs diff=lfs merge=lfs -text
38
+ src.txt filter=lfs diff=lfs merge=lfs -text
39
+ trg.txt filter=lfs diff=lfs merge=lfs -text
da_seikika.txt ADDED
The diff for this file is too large to render. See raw diff
 
distillation.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Sat Jun 17 16:20:22 2023
5
+
6
+ @author: fujidai
7
+ """
8
+
9
+
10
+ from sentence_transformers import SentenceTransformer, LoggingHandler, models, evaluation, losses
11
+ import torch
12
+ from torch.utils.data import DataLoader
13
+ from sentence_transformers.datasets import ParallelSentencesDataset
14
+ from datetime import datetime
15
+
16
+ import os
17
+ import logging
18
+ import sentence_transformers.util
19
+ import csv
20
+ import gzip
21
+ from tqdm.autonotebook import tqdm
22
+ import numpy as np
23
+ import zipfile
24
+ import io
25
+
26
+ logging.basicConfig(format='%(asctime)s - %(message)s',
27
+ datefmt='%Y-%m-%d %H:%M:%S',
28
+ level=logging.INFO,
29
+ handlers=[LoggingHandler()])
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ teacher_model_name = 'metrics_finetuning_teacher.py で作成した教師モデル' #Our monolingual teacher model, we want to convert to multiple languages
34
+
35
+ student_model_name = 'metrics_finetuning_student.py で作成した生徒モデル' #Multilingual base model we use to imitate the teacher model
36
+
37
+
38
+ max_seq_length = 128 #Student model max. lengths for inputs (number of word pieces)
39
+ train_batch_size = 64 #Batch size for training
40
+ inference_batch_size = 64 #Batch size at inference
41
+ max_sentences_per_language = 500000 #Maximum number of parallel sentences for training
42
+ train_max_sentence_length = 250 #Maximum length (characters) for parallel training sentences
43
+
44
+ num_epochs = 100 #Train for x epochs
45
+ num_warmup_steps = 10000 #Warumup steps
46
+
47
+ num_evaluation_steps = 1000 #Evaluate performance after every xxxx steps
48
+ dev_sentences = 1000 #Number of parallel sentences to be used for development
49
+
50
+
51
+ ######## Start the extension of the teacher model to multiple languages ########
52
+ logger.info("Load teacher model")
53
+ teacher_model = SentenceTransformer(teacher_model_name,device='mps')
54
+
55
+
56
+ logger.info("Create student model from scratch")
57
+
58
+ word_embedding_model = models.Transformer(student_model_name, max_seq_length=max_seq_length)
59
+ # Apply mean pooling to get one fixed sized sentence vector
60
+ pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())#denseで次元数を768にする次元数をいじる
61
+ student_model = SentenceTransformer(modules=[word_embedding_model, pooling_model],device='mps')
62
+
63
+ print(teacher_model)
64
+ print(student_model)
65
+
66
+
67
+ from sentence_transformers.datasets import ParallelSentencesDataset
68
+
69
+ train_data = ParallelSentencesDataset(student_model=student_model, teacher_model=teacher_model)
70
+ train_data.load_data('/en-other_tab.txt')# 英語 タブ 他の言語 というようになっている文
71
+
72
+
73
+ #train_data.load_data('/Users/fujidai/TED2020_data/data/tuikazumi/en-ja/TED2020.en-ja.en')
74
+ train_dataloader = DataLoader(train_data, shuffle=True, batch_size=train_batch_size)
75
+ train_loss = losses.MSELoss(model=student_model)
76
+
77
+ print(train_data)
78
+
79
+
80
+ #50000_all-MiniLM-L6-v2__paraphrase-distilroberta-base-v2_epoch-1
81
+
82
+ # Train the model
83
+ print('az')
84
+ student_model.fit(train_objectives=[(train_dataloader, train_loss)],
85
+ epochs=num_epochs,
86
+ #device=device,
87
+ warmup_steps=num_warmup_steps,
88
+ evaluation_steps=num_evaluation_steps,
89
+ optimizer_params= {'lr': 2e-5, 'eps': 1e-6},
90
+ checkpoint_path='checkpoint-savename',
91
+ checkpoint_save_steps=2000#その時に応じて変更する
92
+ )
93
+
94
+ student_model.save('savename')
95
+
96
+
97
+
98
+
99
+
100
+
101
+
102
+
103
+
104
+
105
+
106
+ #
en-other_tab.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fd96b89dfd2f0fdb933772cd5aa11b2bf89ed86dfba9a2fd4563bfea22fe22e
3
+ size 27508845
metrics_finetuning_student.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Thu Aug 17 14:12:16 2023
5
+
6
+ @author: fujidai
7
+ """
8
+
9
+
10
+ import torch
11
+ from sentence_transformers import SentenceTransformer, InputExample, losses,models
12
+ from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
13
+ from sentence_transformers.readers import InputExample
14
+ from torch.utils.data import DataLoader
15
+ from transformers import AutoTokenizer
16
+ from sentence_transformers.SentenceTransformer import SentenceTransformer
17
+ import torch
18
+ import torch.nn.functional as F
19
+ import numpy as np
20
+ from sentence_transformers import SentenceTransformer, util
21
+
22
+
23
+ word_embedding_model = models.Transformer('/paraphrase-multilingual-mpnet-base-v2', max_seq_length=512)# modelの指定をする
24
+
25
+ pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
26
+ #dense_model = models.Dense(in_features=pooling_model.get_sentence_embedding_dimension(),out_features=16)
27
+ model = SentenceTransformer(modules=[word_embedding_model, pooling_model],device='mps')
28
+ print(model)
29
+
30
+
31
+ with open('/da_seikika.txt', 'r') as f:#Negative en-ja cos_sim
32
+
33
+ raberu = f.read()
34
+ raberu_lines = raberu.splitlines()#改行コードごとにリストに入れている
35
+ data = []
36
+ for i in range(len(raberu_lines)):
37
+ data.append(float(raberu_lines[i]))#Negative en-ja cos_simをdataに入れている
38
+
39
+
40
+
41
+
42
+ with open('/src.txt', 'r') as f:#TEDのenglish
43
+ left = f.read()
44
+ left_lines = left.splitlines()
45
+
46
+
47
+ with open('/trg.txt', 'r') as f:#pseudo japanese (TEDのenglishをgoogle翻訳に入れた疑似コーパス)
48
+ right = f.read()
49
+ right_lines = right.splitlines()#改行コードごとにリストに入れている
50
+
51
+
52
+ train_examples = []
53
+ for i in range(len(left_lines)):
54
+ pair=[]
55
+ pair.append(left_lines[i])#left_lines側のi行目をtextsに追加している
56
+ pair.append(right_lines[i])#right_lines側のi行目をtextsに追加している
57
+
58
+ example = InputExample(texts=pair, label=data[i])#textsをラベル付きで追加している
59
+ #print(example)#
60
+ #label=1-data[i]の1は positive cos_sim
61
+ #if aq>=0.25:
62
+ train_examples.append(example)#学習として入れるものに入れている
63
+ print(len(train_examples))
64
+
65
+
66
+ device = torch.device('mps')
67
+ #print(device)
68
+
69
+ import torch.nn.functional as F
70
+
71
+
72
+ train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=8)
73
+ #train_loss = losses.MarginMSELoss(model=model,similarity_fct=F.cosine_similarity)
74
+ train_loss = losses.CosineSimilarityLoss(model)
75
+
76
+
77
+ #Tune the model
78
+ model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=100, warmup_steps=100,show_progress_bar=True,
79
+ #output_path='完成2best-6-30',
80
+ checkpoint_path='checkpoint-savename',checkpoint_save_steps=6699,#どのくらいのイテレーションごとに保存するか
81
+ save_best_model=True#,#,#checkpoint_save_total_limit=5
82
+ #optimizer_params= {'lr': 2e-6}#
83
+
84
+ )
85
+ model.save("savename")
86
+
87
+
88
+
89
+
90
+
91
+
92
+
93
+
94
+
95
+
96
+
97
+
98
+
99
+
100
+
101
+
102
+
103
+
104
+
105
+ #
metrics_finetuning_teacher.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Thu Aug 17 14:12:16 2023
5
+
6
+ @author: fujidai
7
+ """
8
+
9
+
10
+ import torch
11
+ from sentence_transformers import SentenceTransformer, InputExample, losses,models
12
+ from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
13
+ from sentence_transformers.readers import InputExample
14
+ from torch.utils.data import DataLoader
15
+ from transformers import AutoTokenizer
16
+ from sentence_transformers.SentenceTransformer import SentenceTransformer
17
+ import torch
18
+ import torch.nn.functional as F
19
+ import numpy as np
20
+ from sentence_transformers import SentenceTransformer, util
21
+
22
+
23
+ word_embedding_model = models.Transformer('paraphrase-mpnet-base-v2', max_seq_length=512)# modelの指定をする
24
+
25
+ pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
26
+ #dense_model = models.Dense(in_features=pooling_model.get_sentence_embedding_dimension(),out_features=16)
27
+ model = SentenceTransformer(modules=[word_embedding_model, pooling_model],device='mps')
28
+ print(model)
29
+
30
+
31
+ with open('/da_seikika.txt', 'r') as f:#Negative en-ja cos_sim
32
+
33
+ raberu = f.read()
34
+ raberu_lines = raberu.splitlines()#改行コードごとにリストに入れている
35
+ data = []
36
+ for i in range(len(raberu_lines)):
37
+ data.append(float(raberu_lines[i]))#Negative en-ja cos_simをdataに入れている
38
+
39
+
40
+
41
+
42
+ with open('/ref.txt', 'r') as f:#TEDのenglish
43
+ left = f.read()
44
+ left_lines = left.splitlines()
45
+
46
+
47
+ with open('/trg.txt', 'r') as f:#pseudo japanese (TEDのenglishをgoogle翻訳に入れた疑似コーパス)
48
+ right = f.read()
49
+ right_lines = right.splitlines()#改行コードごとにリストに入れている
50
+
51
+
52
+ train_examples = []
53
+ for i in range(len(left_lines)):
54
+ pair=[]
55
+ pair.append(left_lines[i])#left_lines側のi行目をtextsに追加している
56
+ pair.append(right_lines[i])#right_lines側のi行目をtextsに追加している
57
+
58
+ example = InputExample(texts=pair, label=data[i])#textsをラベル付きで追加している
59
+ #print(example)#
60
+ #label=1-data[i]の1は positive cos_sim
61
+ #if aq>=0.25:
62
+ train_examples.append(example)#学習として入れるものに入れている
63
+ print(len(train_examples))
64
+
65
+
66
+ device = torch.device('mps')
67
+ #print(device)
68
+
69
+ import torch.nn.functional as F
70
+
71
+
72
+ train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=8)
73
+ #train_loss = losses.MarginMSELoss(model=model,similarity_fct=F.cosine_similarity)
74
+ train_loss = losses.CosineSimilarityLoss(model)
75
+
76
+
77
+ #Tune the model
78
+ model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=100, warmup_steps=100,show_progress_bar=True,
79
+ #output_path='完成2best-6-30',
80
+ checkpoint_path='checkpoint-savename',checkpoint_save_steps=6699,#どのくらいのイテレーションごとに保存するか
81
+ save_best_model=True#,#,#checkpoint_save_total_limit=5
82
+ #optimizer_params= {'lr': 2e-6}#
83
+
84
+ )
85
+ model.save("savename")
86
+
87
+
88
+
89
+
90
+
91
+
92
+
93
+
94
+
95
+
96
+
97
+
98
+
99
+
100
+
101
+
102
+
103
+
104
+
105
+ #
ref.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a58f832b2017384ca7836311325d30f53554c3e6af82613bdfa9b9432d849be3
3
+ size 13617545
src.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71dc30710bc43bcb09d545a40c51d591083e7b8c79b619b74e090919ddcbe4e7
3
+ size 14587716
trg.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b082586b132398c03c6c84160466512a9a1f8de250ecdafa09ede38feaea1269
3
+ size 12921129