from tqdm import tqdm
from scipy.stats import spearmanr
import  jsonlines
import torch
from torch.utils.data import DataLoader, Dataset
from typing import Dict, List
from transformers import BertConfig, BertModel, BertTokenizer
import torch.nn as nn
from sentence_transformers import SentenceTransformer
# from my_py_toolkit.file.file_toolkit import *

import json
import numpy as np

def readjson(file_path):
  """"""
#   make_path_legal(file_path)
  with open(file_path, "r", encoding="utf-8") as f:
    return json.load(f)

def get_device():
    if torch.cuda.is_available():
        return 'cuda'
    else:
        return 'cpu'

def normalize(x, axis=-1):
    """Normalizing to unit length along the specified dimension.
    Args:
      x: pytorch Variable
    Returns:
      x: pytorch Variable, same shape as input
    """
    x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)#2是范数计算中的幂指数值,就是取2范式
    return x

def generate_embedding(model_path, data_json_path, save_path, model_cls=SentenceTransformer, device=get_device()) -> float:
    """embedding 生成函数
    批量预测, 计算cos_sim, 转成numpy数组拼接起来, 一次性求spearman相关度
    """
    #for x in dataloader:
    #    print(x)
    #print(len(dataloader))

    # 加载模型
    model = model_cls(model_path, device=device)
    model.eval()
    datas = readjson(data_json_path)

    sim_tensor = torch.tensor([], device=device)
    with torch.no_grad():
        number_counter = 0
        for data in datas:
            number_counter += 1
            source_embeddings = model.encode(data,convert_to_tensor = True)
            source_embeddings = source_embeddings[None]
            print(number_counter,end = "\r")
            sim_tensor = torch.cat((sim_tensor, source_embeddings), dim=0)
        sim_tensor = normalize(sim_tensor)
        numpy_save = sim_tensor.cpu().numpy()
        # save_path =  "./zhengzhuang_search_embedding_v20231116.npy"
        print(numpy_save.shape)
        print(source_embeddings.shape)
        #1.90731883e-01  1.10070199e-01  1.73294127e-01 -1.82540357e-01
        np.save(save_path,numpy_save)
    # corrcoef
    # return  None #spearmanr(label_array, sim_tensor.cpu().numpy()).correlation

if __name__ == '__main__':
    model_path = '/home/centos/ll/models/SimCSE-Chinese-Pytorch/roberta-wwm-finetune'
    data_path = './jibing_zhengzhuang_set.json'
    save_path = './jibing_zhengzhuang_set.npy'
    generate_embedding(model_path, data_path, save_path)
