from sentence_transformers import SentenceTransformer, SentencesDataset, InputExample, evaluation, losses

from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
import os
import math
def cosine_distance(a, b):
    if a.shape != b.shape:
        raise RuntimeError("array {} shape not match {}".format(a.shape, b.shape))
    if a.ndim==1:
        a_norm = np.linalg.norm(a)
        b_norm = np.linalg.norm(b)
    elif a.ndim==2:
        a_norm = np.linalg.norm(a, axis=1, keepdims=True)
        b_norm = np.linalg.norm(b, axis=1, keepdims=True)
    else:
        raise RuntimeError("array dimensions {} not right".format(a.ndim))
    similiarity = np.dot(a, b.T)/(a_norm * b_norm) 
    dist = 1. - similiarity
    return dist
# Define the model. Either from scratch of by loading a pre-trained model
# model = SentenceTransformer("distilbert-base-nli-mean-tokens")
model = SentenceTransformer("Ko2CnModel")
# test = os.path.exists("test/Ko2CnModel")
#读取训练数据
# print( cosine_distance(model.encode("网络故障"),model.encode("通信故障")))
# print( cosine_distance(model.encode("网络故障"),model.encode("供电故障")))
train_data = pd.read_csv("train.csv")
# Define your train examples. You need more than just two examples...
train_examples = [
    InputExample(texts=["网络故障", "与内部通信、外部通信以及通信设备有关的故障"], label=1.0),
    InputExample(texts=["网络故障", "外部供电、内部供电以及与供电设备有关的故障"], label=0.0),
    InputExample(texts=["与内部通信、外部通信以及通信设备有关的故障", "外部供电、内部供电以及与供电设备有关的故障"], label=0.0),
    InputExample(texts=["与内部通信、外部通信以及通信设备有关的故障", "外部供电、内部供电以及与供电设备有关的故障"], label=0.0)
]
train_set=[]
dataset=[]
for i in range(len(train_data)):
    fault_desc = train_data["故障描述"][i]
    fault_handle = train_data["处理措施"][i]
    fault_label = train_data["故障类型"][i]
    dataset.append([fault_desc,fault_handle,fault_label])
for i in  range(len(dataset)):
    for j in range(i+1,len(dataset)):
        seq_a = dataset[i]
        seq_b = dataset[j]
        # print(train_set)
        # print(seq_a)
        # print(seq_b)
        if(pd.isna(seq_a[0]) or pd.isna(seq_b[0]) or len(seq_a[0])==0 or len(seq_b[0])==0):
            continue
        elif(seq_a[2]==seq_b[2]):
            train_set.append(InputExample(texts=[seq_a[0], seq_b[0]], label=0.99))
        else:
            train_set.append(InputExample(texts=[seq_a[0], seq_b[0]], label=0.01))
# tt = 0
# train_dataset_res = []
# for pairs in train_set:
#     try:
#         test = len(pairs)>0
#         train_dataset_res.append(pairs)
#         tt+=1
#         continue
#     except Exception:
#         continue

train_dataset = SentencesDataset(train_set, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=64)
# Define your train dataset, the dataloader and the train loss
# train_dataloader = DataLoader(train_set, shuffle=True, batch_size=16)
train_loss = losses.CosineSimilarityLoss(model)

# Tune the model
model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=100, warmup_steps=100, output_path='./model/Ko2CnModel')

print("对比训练结束：")
print( cosine_distance(model.encode("网络故障"),model.encode("通信故障")))
print( cosine_distance(model.encode("网络故障"),model.encode("供电故障")))