import pandas as pd
import torch
from sentence_transformers import SentenceTransformer, SentencesDataset, InputExample, evaluation, losses
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
file_path = '../data/dataset.xlsx'
model_path = './out/bert_cls.pkl'
data_df = pd.read_excel(io=file_path)
data_df['相似度分数\n（初版和终板）'] = data_df['相似度分数\n（初版和终板）'].map({"4分": 0.99, "1分": 0.01})  # 替换
test_features_A, test_features_B, = data_df['初版文本'].to_list(), data_df['终版文本'].to_list()
labels = data_df['相似度分数\n（初版和终板）'].to_list()
sentences_pairs, sentences_labels = [], []
for i in range(len(test_features_A)):
    if isinstance(test_features_A[i], str) and isinstance(test_features_B[i], str) \
            and isinstance(labels[i], float):
        a = test_features_A[i].replace('\n', '').replace('\t', '').replace('\r', '')
        b = test_features_B[i].replace('\n', '').replace('\t', '').replace('\r', '')
        sentences_pairs.append([a, b])
        sentences_labels.append(labels[i])
    else:
        print(f"去除第{i}处数据")

train_features, test_features, train_targets, test_targets = train_test_split(sentences_pairs, sentences_labels)
train_size, eval_size = len(train_targets), len(test_targets)

train_data = []
for idx in range(train_size):
    texts = [train_features[idx][0], train_features[idx][1]]
    label = train_targets[idx]
    train_data.append(InputExample(texts=texts, label=label))

test_features_A, test_features_B = [], []
for idx in range(eval_size):
    sentence_A = test_features[idx][0]
    sentence_B = test_features[idx][1]
    test_features_A.append(sentence_A)
    test_features_B.append(sentence_B)

model = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2')
evaluator = evaluation.EmbeddingSimilarityEvaluator(test_features_A, test_features_B, test_targets)
# Define your train dataset, the dataloader and the train loss
train_dataset = SentencesDataset(train_data, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=64)
train_loss = losses.CosineSimilarityLoss(model)

model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=20, warmup_steps=100, evaluator=evaluator, evaluation_steps=100, output_path='./out/sbt_model')
