F-Haru commited on
Commit
7775414
1 Parent(s): 5e90942

Upload distillation.py

Browse files
Files changed (1) hide show
  1. distillation.py +110 -0
distillation.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Sat Jun 17 16:20:22 2023
5
+
6
+ @author: fujidai
7
+ """
8
+
9
+
10
+ from sentence_transformers import SentenceTransformer, LoggingHandler, models, evaluation, losses
11
+ import torch
12
+ from torch.utils.data import DataLoader
13
+ from sentence_transformers.datasets import ParallelSentencesDataset
14
+ from datetime import datetime
15
+
16
+ import os
17
+ import logging
18
+ import sentence_transformers.util
19
+ import csv
20
+ import gzip
21
+ from tqdm.autonotebook import tqdm
22
+ import numpy as np
23
+ import zipfile
24
+ import io
25
+
26
+ logging.basicConfig(format='%(asctime)s - %(message)s',
27
+ datefmt='%Y-%m-%d %H:%M:%S',
28
+ level=logging.INFO,
29
+ handlers=[LoggingHandler()])
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ teacher_model_name = 'teacher_finetune.py で作成したモデル' #Our monolingual teacher model, we want to convert to multiple languages
34
+ #teacher_model_name = '/Users/fujidai/TED2020_data/tisikizyouryu/bert-large-nli-mean-tokens' #Our monolingual teacher model, we want to convert to multiple languages
35
+
36
+ student_model_name = '完成2-MarginMSELoss-finetuning-6-30' #Multilingual base model we use to imitate the teacher model
37
+ #student_model_name = '/Users/fujidai/sinTED/完成2-MarginMSELoss-finetuning-6-30'
38
+
39
+
40
+ max_seq_length = 128 #Student model max. lengths for inputs (number of word pieces)
41
+ train_batch_size = 64 #Batch size for training
42
+ inference_batch_size = 64 #Batch size at inference
43
+ max_sentences_per_language = 500000 #Maximum number of parallel sentences for training
44
+ train_max_sentence_length = 250 #Maximum length (characters) for parallel training sentences
45
+
46
+ num_epochs = 100 #Train for x epochs
47
+ num_warmup_steps = 10000 #Warumup steps
48
+
49
+ num_evaluation_steps = 1000 #Evaluate performance after every xxxx steps
50
+ dev_sentences = 1000 #Number of parallel sentences to be used for development
51
+
52
+
53
+ ######## Start the extension of the teacher model to multiple languages ########
54
+ logger.info("Load teacher model")
55
+ teacher_model = SentenceTransformer(teacher_model_name,device='mps')
56
+
57
+
58
+ logger.info("Create student model from scratch")
59
+
60
+ word_embedding_model = models.Transformer(student_model_name, max_seq_length=max_seq_length)
61
+ # Apply mean pooling to get one fixed sized sentence vector
62
+ pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())#denseで次元数を768にする次元数をいじる
63
+ student_model = SentenceTransformer(modules=[word_embedding_model, pooling_model],device='mps')
64
+
65
+ print(teacher_model)
66
+ print(student_model)
67
+
68
+
69
+ from sentence_transformers.datasets import ParallelSentencesDataset
70
+
71
+ train_data = ParallelSentencesDataset(student_model=student_model, teacher_model=teacher_model)
72
+ train_data.load_data('output-100000-karanasi.txt')#日本語英語をタブで繋げたやつ
73
+ #train_data.load_data('/Users/fujidai/TED2020_data/wmt21/output-100.txt')#日本語英語をタブで繋げたやつ
74
+
75
+ #train_data.load_data('/Users/fujidai/TED2020_data/data/tuikazumi/en-ja/TED2020.en-ja.en')
76
+ train_dataloader = DataLoader(train_data, shuffle=True, batch_size=train_batch_size)
77
+ train_loss = losses.MSELoss(model=student_model)
78
+
79
+ print(train_data)
80
+
81
+
82
+ #50000_all-MiniLM-L6-v2__paraphrase-distilroberta-base-v2_epoch-1
83
+
84
+ # Train the model
85
+ print('az')
86
+ student_model.fit(train_objectives=[(train_dataloader, train_loss)],
87
+ epochs=num_epochs,
88
+ #device=device,
89
+ warmup_steps=num_warmup_steps,
90
+ evaluation_steps=num_evaluation_steps,
91
+ #output_path='best_paraphrase-mpnet-base-v2__xlm-roberta-base_epoch-3',
92
+ #save_best_model=True,
93
+ optimizer_params= {'lr': 2e-5, 'eps': 1e-6},
94
+ checkpoint_path='checkpoint_savename',
95
+ checkpoint_save_steps=2344
96
+ )
97
+
98
+ student_model.save('savename')
99
+
100
+
101
+
102
+
103
+
104
+
105
+
106
+
107
+
108
+
109
+
110
+ #