from tqdm import tqdm
from scipy.stats import spearmanr
import  jsonlines
import torch
from torch.utils.data import DataLoader, Dataset
from typing import Dict, List
from transformers import BertConfig, BertModel, BertTokenizer
import torch.nn as nn
from sentence_transformers import SentenceTransformer

import json
import numpy as np
MAXLEN = 128
POOLING = 'cls'


#ROBERTA = '/home/lisong/work/ChatGLM-6B/simcse_V2/SimCSE-Chinese-Pytorch/roberta-wwm'
# ROBERTA = '../weights/m3e_model/'
# ROBERTA = './roberta-wwm-finetune'
ROBERTA = './roberta-wwm-finetune'
#model_path = '../SimCSE-Chinese-Pytorch/saved_model/simcse_sup.pt'
DEVICE = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
model = SentenceTransformer(ROBERTA,device = DEVICE)

def normalize(x, axis=-1):
    """Normalizing to unit length along the specified dimension.
    Args:
      x: pytorch Variable
    Returns:
      x: pytorch Variable, same shape as input
    """
    x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)#2是范数计算中的幂指数值,就是取2范式
    return x

def eval(model, datas) -> float:
    """模型评估函数
    批量预测, 计算cos_sim, 转成numpy数组拼接起来, 一次性求spearman相关度
    """
    #for x in dataloader:
    #    print(x)
    #print(len(dataloader))
    model.eval()
    sim_tensor = torch.tensor([], device=DEVICE)
    label_array = np.array([])
    with torch.no_grad():
        number_counter = 0
        for data in datas:
            number_counter += 1
            #print(number_counter)
            #print(source)
            #print(target)
            # source        [batch, 1, seq_len] -> [batch, seq_len]
            #source_input_ids = source['input_ids'].squeeze(1).to(DEVICE)
            #source_attention_mask = source['attention_mask'].squeeze(1).to(DEVICE)
            #source_token_type_ids = source['token_type_ids'].squeeze(1).to(DEVICE)
            #source_pred = model(source_input_ids, source_attention_mask, source_token_type_ids)
            #print(source)
            source_embeddings = model.encode(data,convert_to_tensor = True)
            source_embeddings = source_embeddings[None]
            #source_embeddings
            #print(source_embeddings.shape)
            # target        [batch, 1, seq_len] -> [batch, seq_len]
            print(number_counter,end = "\r")
            sim_tensor = torch.cat((sim_tensor, source_embeddings), dim=0)
        #print(sim_tensor.size())
        #print(type(sim_tensor))
        sim_tensor = normalize(sim_tensor)
        numpy_save = sim_tensor.cpu().numpy()
        save_path =  "./zhengzhuang_search_embedding_v20231116.npy"
        print(numpy_save.shape)
        print(source_embeddings.shape)
        #1.90731883e-01  1.10070199e-01  1.73294127e-01 -1.82540357e-01
        np.save(save_path,numpy_save)
    # corrcoef
    return  None#spearmanr(label_array, sim_tensor.cpu().numpy()).correlation


def readjson(file_path):
  """"""
  # make_path_legal(file_path)
  with open(file_path, "r", encoding="utf-8") as f:
    return json.load(f)

datas = readjson('./dataset/symptoms_all_v20231116.json')

eval(model, datas)