from torch.utils.data import DataLoader
import torch.nn as nn
from sentence_transformers import SentenceTransformer, InputExample, losses
from sentence_transformers import models, evaluation
from preprocess import get_data, model_save_path, result_save_path, sen_length, checkpoints_path, epochs, data_name, \
    data_file
import os

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

os.environ["CUDA_VISIBLE_DEVICES"] = '1'
# 加载预训练模型
model_path = './model_hub/roberta-wwm-ext'

word_embedding_model = models.Transformer(model_path, max_seq_length=sen_length)  # 最长字数

pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
# 密集层,选择Tanh激活函数，输出维度大小为256维
dense_model = models.Dense(in_features=pooling_model.get_sentence_embedding_dimension(),
                           out_features=256, activation_function=nn.Sigmoid())

# dense_model = models.Dense(in_features=pooling_model.get_sentence_embedding_dimension(),
#                            out_features=256, activation_function=nn.Sigmoid())  # 修改Sigmoid/ReLu/LeakyReLu/Softplus

# dense_model = models.Dense(in_features=pooling_model.get_sentence_embedding_dimension(),
#                            out_features=128, activation_function=nn.Tanh())  # 修改128/512

model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dense_model])
# model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dense_model], device="cuda")
x_train, x_test, y_train, y_test = get_data(data_file)

train_examples = []
# 数据形式InputExample
for s, label in zip(x_train, y_train):
    s1, s2 = s
    if not isinstance(s1, str):
        continue
    if not isinstance(s2, str):
        continue
    train_examples.append(
        InputExample(texts=[s1, s2], label=float(label))
    )
test_examples = []
for s, label in zip(x_test, y_test):
    s1, s2 = s
    if not isinstance(s1, str):
        continue
    if not isinstance(s2, str):
        continue
    test_examples.append(
        InputExample(texts=[s1, s2], label=float(label))
    )

train_loader = DataLoader(train_examples, shuffle=True, batch_size=8)  # batch size修改4/8/16/32

train_loss = losses.CosineSimilarityLoss(model)
# 定义测试器，有监督时
evaluator = evaluation.EmbeddingSimilarityEvaluator.from_input_examples(test_examples, write_csv=True)

model.fit(train_objectives=[(train_loader, train_loss)],
          epochs=epochs,
          evaluator=evaluator,
          evaluation_steps=215,
          warmup_steps=100,
          save_best_model=True,
          output_path=model_save_path)
# checkpoint_path=checkpoints_path,
# checkpoint_save_steps=215)
