Upload 10 files
Browse files- distillation.py +108 -0
- finetune.py +104 -0
- pseudo-en-ja-100000-karanasi_09-04.txt +0 -0
- pseudo-english-sentence-100000-karanasi_09-04.txt +0 -0
- pseudo-english_english_100000_cos-sim-karanasi_09-04.txt +0 -0
- pseudo-ja-en-100000-karanasi_09-04.txt +0 -0
- pseudo-japanese-sentence-100000-karanasi_09-04.txt +0 -0
- pseudo-pseudo-english_english_100000_cos-sim-karanasi_09-04.txt +0 -0
- pseudo-pseudo_en-ja-100000-karanasi_09-04.txt +0 -0
- pseudo-pseudo_ja-en-100000-karanasi_09-04.txt +0 -0
distillation.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Sat Jun 17 16:20:22 2023
|
5 |
+
|
6 |
+
@author: fujidai
|
7 |
+
"""
|
8 |
+
|
9 |
+
|
10 |
+
from sentence_transformers import SentenceTransformer, LoggingHandler, models, evaluation, losses
|
11 |
+
import torch
|
12 |
+
from torch.utils.data import DataLoader
|
13 |
+
from sentence_transformers.datasets import ParallelSentencesDataset
|
14 |
+
from datetime import datetime
|
15 |
+
|
16 |
+
import os
|
17 |
+
import logging
|
18 |
+
import sentence_transformers.util
|
19 |
+
import csv
|
20 |
+
import gzip
|
21 |
+
from tqdm.autonotebook import tqdm
|
22 |
+
import numpy as np
|
23 |
+
import zipfile
|
24 |
+
import io
|
25 |
+
|
26 |
+
logging.basicConfig(format='%(asctime)s - %(message)s',
|
27 |
+
datefmt='%Y-%m-%d %H:%M:%S',
|
28 |
+
level=logging.INFO,
|
29 |
+
handlers=[LoggingHandler()])
|
30 |
+
logger = logging.getLogger(__name__)
|
31 |
+
|
32 |
+
|
33 |
+
teacher_model_name = '/Users/fujidai/sinTED/paraphrase-mpnet-base-v2' #Our monolingual teacher model, we want to convert to multiple languages
|
34 |
+
#teacher_model_name = '/Users/fujidai/TED2020_data/tisikizyouryu/bert-large-nli-mean-tokens' #Our monolingual teacher model, we want to convert to multiple languages
|
35 |
+
|
36 |
+
student_model_name = '/Users/fujidai/dataseigen/09-MarginMSELoss-finetuning-7-5' #Multilingual base model we use to imitate the teacher model
|
37 |
+
|
38 |
+
max_seq_length = 128 #Student model max. lengths for inputs (number of word pieces)
|
39 |
+
train_batch_size = 128 #Batch size for training
|
40 |
+
inference_batch_size = 128 #Batch size at inference
|
41 |
+
max_sentences_per_language = 500000 #Maximum number of parallel sentences for training
|
42 |
+
train_max_sentence_length = 250 #Maximum length (characters) for parallel training sentences
|
43 |
+
|
44 |
+
num_epochs = 3 #Train for x epochs
|
45 |
+
num_warmup_steps = 10000 #Warumup steps
|
46 |
+
|
47 |
+
num_evaluation_steps = 1000 #Evaluate performance after every xxxx steps
|
48 |
+
dev_sentences = 1000 #Number of parallel sentences to be used for development
|
49 |
+
|
50 |
+
|
51 |
+
######## Start the extension of the teacher model to multiple languages ########
|
52 |
+
logger.info("Load teacher model")
|
53 |
+
teacher_model = SentenceTransformer(teacher_model_name,device='mps')
|
54 |
+
|
55 |
+
|
56 |
+
logger.info("Create student model from scratch")
|
57 |
+
|
58 |
+
word_embedding_model = models.Transformer(student_model_name, max_seq_length=max_seq_length)
|
59 |
+
# Apply mean pooling to get one fixed sized sentence vector
|
60 |
+
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())#denseで次元数を768にする次元数をいじる
|
61 |
+
student_model = SentenceTransformer(modules=[word_embedding_model, pooling_model],device='mps')
|
62 |
+
|
63 |
+
print(teacher_model)
|
64 |
+
print(student_model)
|
65 |
+
|
66 |
+
|
67 |
+
from sentence_transformers.datasets import ParallelSentencesDataset
|
68 |
+
|
69 |
+
train_data = ParallelSentencesDataset(student_model=student_model, teacher_model=teacher_model)
|
70 |
+
train_data.load_data('/Users/fujidai/dataseigen/09-04_09-04.txt')#日本語英語をタブで繋げたやつ
|
71 |
+
#train_data.load_data('/Users/fujidai/TED2020_data/wmt21/output-100.txt')#日本語英語をタブで繋げたやつ
|
72 |
+
|
73 |
+
#train_data.load_data('/Users/fujidai/TED2020_data/data/tuikazumi/en-ja/TED2020.en-ja.en')
|
74 |
+
train_dataloader = DataLoader(train_data, shuffle=True, batch_size=train_batch_size)
|
75 |
+
train_loss = losses.MSELoss(model=student_model)
|
76 |
+
|
77 |
+
print(train_data)
|
78 |
+
|
79 |
+
|
80 |
+
#50000_all-MiniLM-L6-v2__paraphrase-distilroberta-base-v2_epoch-1
|
81 |
+
|
82 |
+
# Train the model
|
83 |
+
print('az')
|
84 |
+
student_model.fit(train_objectives=[(train_dataloader, train_loss)],
|
85 |
+
epochs=num_epochs,
|
86 |
+
#device=device,
|
87 |
+
warmup_steps=num_warmup_steps,
|
88 |
+
evaluation_steps=num_evaluation_steps,
|
89 |
+
#output_path='best_paraphrase-mpnet-base-v2__xlm-roberta-base_epoch-3',
|
90 |
+
#save_best_model=True,
|
91 |
+
optimizer_params= {'lr': 2e-5, 'eps': 1e-6},
|
92 |
+
checkpoint_path='paraphrase-mpnet-base-v2_09-MarginMSELoss-finetuning-7-5_2',
|
93 |
+
checkpoint_save_steps=820
|
94 |
+
)
|
95 |
+
|
96 |
+
student_model.save('paraphrase-mpnet-base-v2_09-MarginMSELoss-finetuning-7-5')
|
97 |
+
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
#
|
finetune.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Fri Jun 30 08:47:31 2023
|
5 |
+
|
6 |
+
@author: fujidai
|
7 |
+
"""
|
8 |
+
|
9 |
+
|
10 |
+
import torch
|
11 |
+
from sentence_transformers import SentenceTransformer, InputExample, losses,models
|
12 |
+
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
|
13 |
+
from sentence_transformers.readers import InputExample
|
14 |
+
from torch.utils.data import DataLoader
|
15 |
+
from transformers import AutoTokenizer
|
16 |
+
from sentence_transformers.SentenceTransformer import SentenceTransformer
|
17 |
+
import torch
|
18 |
+
import torch.nn.functional as F
|
19 |
+
import numpy as np
|
20 |
+
from sentence_transformers import SentenceTransformer, util
|
21 |
+
|
22 |
+
|
23 |
+
word_embedding_model = models.Transformer('/Users/fujidai/sinTED/xlm-roberta-base', max_seq_length=510)# modelの指定をする
|
24 |
+
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
|
25 |
+
#dense_model = models.Dense(in_features=pooling_model.get_sentence_embedding_dimension(),out_features=16)
|
26 |
+
model = SentenceTransformer(modules=[word_embedding_model, pooling_model],device='mps')
|
27 |
+
print(model)
|
28 |
+
|
29 |
+
|
30 |
+
with open('/Users/fujidai/dataseigen/up/pseudo-pseudo-english_english_100000_cos-sim-karanasi_09-04.txt', 'r') as f:#Negative en-ja cos_sim
|
31 |
+
raberu = f.read()
|
32 |
+
raberu_lines = raberu.splitlines()#改行コードごとにリストに入れている
|
33 |
+
data = []
|
34 |
+
for i in range(len(raberu_lines)):
|
35 |
+
data.append(float(raberu_lines[i]))#Negative en-ja cos_simをdataに入れている
|
36 |
+
|
37 |
+
with open('/Users/fujidai/dataseigen/up/pseudo-pseudo_en-ja-100000-karanasi_09-04.txt', 'r') as f:#TEDのenglish
|
38 |
+
left = f.read()
|
39 |
+
left_lines = left.splitlines()
|
40 |
+
|
41 |
+
with open('/Users/fujidai/dataseigen/up/pseudo-pseudo_ja-en-100000-karanasi_09-04.txt', 'r') as f:#TEDのjapanese
|
42 |
+
senter = f.read()
|
43 |
+
senter_lines = senter.splitlines()
|
44 |
+
|
45 |
+
with open('/Users/fujidai/dataseigen/up/pseudo-japanese-sentence-100000-karanasi_09-04.txt', 'r') as f:#pseudo japanese (TEDのenglishをgoogle翻訳に入れた疑似コーパス)
|
46 |
+
right = f.read()
|
47 |
+
right_lines = right.splitlines()#改行コードごとにリストに入れている
|
48 |
+
|
49 |
+
|
50 |
+
train_examples = []
|
51 |
+
for i in range(len(left_lines)):
|
52 |
+
pair=[]
|
53 |
+
pair.append(left_lines[i])#left_lines側のi行目をtextsに追加している
|
54 |
+
pair.append(senter_lines[i])
|
55 |
+
pair.append(right_lines[i])#right_lines側のi行目をtextsに追加している
|
56 |
+
example = InputExample(texts=pair, label=1-data[i])#textsをラベル付きで追加している
|
57 |
+
#label=1-data[i]の1は positive cos_sim
|
58 |
+
train_examples.append(example)#学習として入れるものに入れている
|
59 |
+
|
60 |
+
with open('/Users/fujidai/dataseigen/down/pseudo-english_english_100000_cos-sim-karanasi_09-04.txt', 'r') as f:##Negative ja-en cos_sim
|
61 |
+
raberu2 = f.read()
|
62 |
+
raberu2_lines = raberu2.splitlines()#改行コードごとにリストに入れている
|
63 |
+
data2 = []
|
64 |
+
for i in range(len(raberu2_lines)):
|
65 |
+
data2.append(float(raberu2_lines[i]))#Negative ja-en cos_simをdata2に入れている
|
66 |
+
|
67 |
+
with open('/Users/fujidai/dataseigen/down/pseudo-ja-en-100000-karanasi_09-04.txt', 'r') as f:#TEDのjapanese
|
68 |
+
left2 = f.read()
|
69 |
+
left2_lines = left2.splitlines()
|
70 |
+
|
71 |
+
with open('/Users/fujidai/dataseigen/down/pseudo-en-ja-100000-karanasi_09-04.txt', 'r') as f:#TEDのenglish
|
72 |
+
senter2 = f.read()
|
73 |
+
senter2_lines = senter2.splitlines()
|
74 |
+
|
75 |
+
with open('/Users/fujidai/dataseigen/down/pseudo-english-sentence-100000-karanasi_09-04.txt', 'r') as f:#pseudo english (TEDのjapaneseをgoogle翻訳に入れた疑似コーパス)
|
76 |
+
right2 = f.read()
|
77 |
+
right2_lines = right2.splitlines()#改行コードごとにリストに入れている
|
78 |
+
|
79 |
+
for i in range(len(left2_lines)):
|
80 |
+
pair=[]
|
81 |
+
pair.append(left2_lines[i])#left_lines側のi行目をtextsに追加している
|
82 |
+
pair.append(senter2_lines[i])
|
83 |
+
pair.append(right2_lines[i])#right_lines側のi行目をtextsに追加している
|
84 |
+
example = InputExample(texts=pair, label=1-data2[i])#textsをラベル付きで追加している
|
85 |
+
#label=1-data2[i]の1は positive cos_sim
|
86 |
+
train_examples.append(example)#学習として入れるものに入れている
|
87 |
+
|
88 |
+
|
89 |
+
device = torch.device('mps')
|
90 |
+
#print(device)
|
91 |
+
|
92 |
+
import torch.nn.functional as F
|
93 |
+
|
94 |
+
|
95 |
+
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=8)
|
96 |
+
train_loss = losses.MarginMSELoss(model=model,similarity_fct=F.cosine_similarity)
|
97 |
+
|
98 |
+
|
99 |
+
#Tune the model
|
100 |
+
model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=3, warmup_steps=1000,show_progress_bar=True,
|
101 |
+
#output_path='完成2best-6-30',
|
102 |
+
checkpoint_path='checkpoint_savename',checkpoint_save_steps=2300,#どのくらいのイテレーションごとに保存するか
|
103 |
+
save_best_model=True)#checkpoint_save_total_limit=5,
|
104 |
+
model.save("savename")
|
pseudo-en-ja-100000-karanasi_09-04.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pseudo-english-sentence-100000-karanasi_09-04.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pseudo-english_english_100000_cos-sim-karanasi_09-04.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pseudo-ja-en-100000-karanasi_09-04.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pseudo-japanese-sentence-100000-karanasi_09-04.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pseudo-pseudo-english_english_100000_cos-sim-karanasi_09-04.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pseudo-pseudo_en-ja-100000-karanasi_09-04.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pseudo-pseudo_ja-en-100000-karanasi_09-04.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|