File size: 4,705 Bytes
d6fa90a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 30 08:47:31 2023
@author: fujidai
"""
import torch
from sentence_transformers import SentenceTransformer, InputExample, losses,models
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
from sentence_transformers.readers import InputExample
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from sentence_transformers.SentenceTransformer import SentenceTransformer
import torch
import torch.nn.functional as F
import numpy as np
from sentence_transformers import SentenceTransformer, util
word_embedding_model = models.Transformer('/Users/fujidai/sinTED/xlm-roberta-base', max_seq_length=510)# modelの指定をする
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
#dense_model = models.Dense(in_features=pooling_model.get_sentence_embedding_dimension(),out_features=16)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model],device='mps')
print(model)
with open('/Users/fujidai/dataseigen/up/pseudo-pseudo-english_english_100000_cos-sim-karanasi_09-04.txt', 'r') as f:#Negative en-ja cos_sim
raberu = f.read()
raberu_lines = raberu.splitlines()#改行コードごとにリストに入れている
data = []
for i in range(len(raberu_lines)):
data.append(float(raberu_lines[i]))#Negative en-ja cos_simをdataに入れている
with open('/Users/fujidai/dataseigen/up/pseudo-pseudo_en-ja-100000-karanasi_09-04.txt', 'r') as f:#TEDのenglish
left = f.read()
left_lines = left.splitlines()
with open('/Users/fujidai/dataseigen/up/pseudo-pseudo_ja-en-100000-karanasi_09-04.txt', 'r') as f:#TEDのjapanese
senter = f.read()
senter_lines = senter.splitlines()
with open('/Users/fujidai/dataseigen/up/pseudo-japanese-sentence-100000-karanasi_09-04.txt', 'r') as f:#pseudo japanese (TEDのenglishをgoogle翻訳に入れた疑似コーパス)
right = f.read()
right_lines = right.splitlines()#改行コードごとにリストに入れている
train_examples = []
for i in range(len(left_lines)):
pair=[]
pair.append(left_lines[i])#left_lines側のi行目をtextsに追加している
pair.append(senter_lines[i])
pair.append(right_lines[i])#right_lines側のi行目をtextsに追加している
example = InputExample(texts=pair, label=1-data[i])#textsをラベル付きで追加している
#label=1-data[i]の1は positive cos_sim
train_examples.append(example)#学習として入れるものに入れている
with open('/Users/fujidai/dataseigen/down/pseudo-english_english_100000_cos-sim-karanasi_09-04.txt', 'r') as f:##Negative ja-en cos_sim
raberu2 = f.read()
raberu2_lines = raberu2.splitlines()#改行コードごとにリストに入れている
data2 = []
for i in range(len(raberu2_lines)):
data2.append(float(raberu2_lines[i]))#Negative ja-en cos_simをdata2に入れている
with open('/Users/fujidai/dataseigen/down/pseudo-ja-en-100000-karanasi_09-04.txt', 'r') as f:#TEDのjapanese
left2 = f.read()
left2_lines = left2.splitlines()
with open('/Users/fujidai/dataseigen/down/pseudo-en-ja-100000-karanasi_09-04.txt', 'r') as f:#TEDのenglish
senter2 = f.read()
senter2_lines = senter2.splitlines()
with open('/Users/fujidai/dataseigen/down/pseudo-english-sentence-100000-karanasi_09-04.txt', 'r') as f:#pseudo english (TEDのjapaneseをgoogle翻訳に入れた疑似コーパス)
right2 = f.read()
right2_lines = right2.splitlines()#改行コードごとにリストに入れている
for i in range(len(left2_lines)):
pair=[]
pair.append(left2_lines[i])#left_lines側のi行目をtextsに追加している
pair.append(senter2_lines[i])
pair.append(right2_lines[i])#right_lines側のi行目をtextsに追加している
example = InputExample(texts=pair, label=1-data2[i])#textsをラベル付きで追加している
#label=1-data2[i]の1は positive cos_sim
train_examples.append(example)#学習として入れるものに入れている
device = torch.device('mps')
#print(device)
import torch.nn.functional as F
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=8)
train_loss = losses.MarginMSELoss(model=model,similarity_fct=F.cosine_similarity)
#Tune the model
model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=3, warmup_steps=1000,show_progress_bar=True,
#output_path='完成2best-6-30',
checkpoint_path='checkpoint_savename',checkpoint_save_steps=2300,#どのくらいのイテレーションごとに保存するか
save_best_model=True)#checkpoint_save_total_limit=5,
model.save("savename")
|