from sentence_transformers import SentenceTransformer, SentencesDataset, InputExample, evaluation, losses
from collections import defaultdict
import torch
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
import os
import math

def cosine_distance(a, b):
    if a.shape != b.shape:
        raise RuntimeError("array {} shape not match {}".format(a.shape, b.shape))
    if a.ndim==1:
        a_norm = np.linalg.norm(a)
        b_norm = np.linalg.norm(b)
    elif a.ndim==2:
        a_norm = np.linalg.norm(a, axis=1, keepdims=True)
        b_norm = np.linalg.norm(b, axis=1, keepdims=True)
    else:
        raise RuntimeError("array dimensions {} not right".format(a.ndim))
    similiarity = np.dot(a, b.T)/(a_norm * b_norm) 
    dist = 1. - similiarity
    return dist
# Define the model. Either from scratch of by loading a pre-trained model
# model = SentenceTransformer("distilbert-base-nli-mean-tokens")
model = SentenceTransformer("Ko2CnModel")
# test = os.path.exists("test/Ko2CnModel")
#读取训练数据
# print( cosine_distance(model.encode("网络故障"),model.encode("通信故障")))
# print( cosine_distance(model.encode("网络故障"),model.encode("供电故障")))
train_data = pd.read_csv("train.csv")
# Define your train examples. You need more than just two examples...

train_set=[]
dataset=[]
train_dict = defaultdict(list)
centre_dict = defaultdict(list)
for i in range(len(train_data)):
    fault_desc = train_data["故障描述"][i]
    fault_handle = train_data["处理措施"][i]
    fault_label = train_data["故障类型"][i]
    dataset.append([fault_desc,fault_handle,fault_label])
#计算向量,按照故障类别名称存储起来
for i in range(len(dataset)):
    print(dataset[i][0])
    if(pd.isna(dataset[i][0])):
        continue
    embedding = torch.from_numpy(model.encode(dataset[i][0]))
    train_dict[dataset[i][2]].append(torch.unsqueeze(embedding,0))

#计算中心向量
for key in train_dict.keys():
    # print(torch.mean(torch.stack(train_dict[key],0),0).shape)
    centre_dict[key].append(torch.squeeze(torch.mean(torch.stack(train_dict[key],0),0)))
#计算相似度
positive_nums = 0
for i in range(len(dataset)):
    if(pd.isna(dataset[i][0])):
        continue
    embedding = torch.from_numpy(model.encode(dataset[i][0]))
    label = dataset[i][2]
    max_value = -1
    max_label = ""
    #相似度计算
    for standard in centre_dict.keys():
        diff = cosine_distance(centre_dict[standard][0],embedding)
        if(diff>max_value):
            max_value=diff
            max_label=standard
    dataset[i].append(max_label)
    if(dataset[i][2]==dataset[3]):
        positive_nums+=1
print(positive_nums/len(dataset))



# Tune the model

print("训练后：")
print( cosine_distance(model.encode("网络故障"),model.encode("通信故障")))
print( cosine_distance(model.encode("网络故障"),model.encode("供电故障")))