import time
import torch
import numpy as np
from scipy.stats import pearsonr, spearmanr
from loguru import logger
import os
import json
from sent2vec import Sent2VecEmbeddings

def load_jsonl(json_path):
    """
    Load jsonl file.
    Args:
        json_path (str): jsonl file path.
    Returns:
        list: list of json object.
    """
    data = []
    with open(json_path, 'r', encoding='utf-8') as f:
        for json_str in f:
            try:
                result = json.loads(json_str.strip('\n'))
                data.append(result)
            except:
                print('error', json_str)
    return data


def load_text_matching_test_data(path):
    """
    Load test data from file.
        args: file path
        return: list of (text_a, text_b, score)
    """
    data = []
    if not os.path.isfile(path):
        logger.warning(f'file not exist: {path}')
        return data

    def get_field_names(data_item):
        # 这里要比较小心，不同的文件有不同的filed_name！比如query1，query2，如果不在这里创建新分支就会有问题。
        if "text1" in data_item and "text2" in data_item:
            return "text1", "text2"
        elif "sentence1" in data_item and "sentence2" in data_item:
            return "sentence1", "sentence2" 
        elif "question_1" in data_item and "question_2" in data_item:
            return "question_1", "question_2" 
        else:
            return None, None

    if path.endswith('.jsonl'):
        data_list = load_jsonl(path)
        for entry in data_list:
            field1, field2 = get_field_names(entry)
            if not field1 or not field2:
                continue

            text_a, text_b, score = entry[field1], entry[field2], int(entry["label"])
            data.append((text_a, text_b, score))
    elif path.endswith('.json'):
        with open(path, 'r', encoding='utf8') as f:
            import json
            data_list=json.load(f)
            for entry in data_list:
                field1, field2 = get_field_names(entry)
                if not field1 or not field2:
                    continue

                text_a, text_b, score = entry[field1], entry[field2], int(entry["label"])
                data.append((text_a, text_b, score))
    else:
        # txt 
        with open(path, 'r', encoding='utf8') as f:
            for line in f:
                line = line.strip().split('\t')
                if len(line) != 3:
                    logger.warning(f'line size not match, pass: {line}')
                    continue
                score = int(line[2])
                data.append((line[0], line[1], score))
    return data


def compute_spearmanr(x, y):
    """
    Spearman相关系数
    """
    return spearmanr(x, y).correlation

def compute_pearsonr(x, y):
    """
    Pearson系数
    """
    return pearsonr(x, y)[0]

def cos_sim(a: torch.Tensor, b: torch.Tensor):
    """
    Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.
    :return: Matrix with res[i][j]  = cos_sim(a[i], b[j])
    """
    if not isinstance(a, torch.Tensor):
        a = torch.tensor(a)

    if not isinstance(b, torch.Tensor):
        b = torch.tensor(b)

    if len(a.shape) == 1:
        a = a.unsqueeze(0)

    if len(b.shape) == 1:
        b = b.unsqueeze(0)

    a_norm = torch.nn.functional.normalize(a, p=2, dim=1)
    b_norm = torch.nn.functional.normalize(b, p=2, dim=1)
    return torch.mm(a_norm, b_norm.transpose(0, 1))

def calc_similarity_scores(model, sents1, sents2, labels):
    t1 = time.time()
    e1 = model.embed_documents(sents1)
    e2 = model.embed_documents(sents2)
    spend_time = time.time() - t1
    s = cos_sim(e1, e2)
    sims = []
    for i in range(len(sents1)):
        sims.append(s[i][i])
    sims = np.array(sims)
    labels = np.array(labels)
    spearman = compute_spearmanr(labels, sims)
    logger.debug(f'labels: {labels[:10]}')
    logger.debug(f'preds:  {sims[:10]}')
    logger.debug(f'Spearman: {spearman}')
    logger.debug(
        f'spend time: {spend_time:.4f}, count:{len(sents1 + sents2)}, qps: {len(sents1 + sents2) / spend_time}')
    return spearman


test_file='/data/lxy/gaobao_data/db_v2.1/test.data1'
model_name='/home/lxy/DPR/models/checkpoint-5391-epoch-1'

model=Sent2VecEmbeddings(model_name)
test_data = load_text_matching_test_data(test_file)

# Predict embeddings
srcs = []
trgs = []
labels = []
for terms in test_data:
    src, trg, label = terms[0], terms[1], terms[2]
    srcs.append(src)
    trgs.append(trg)
    labels.append(label)

# logger.debug(f'{test_data[0]}')
# sentence_embeddings = model.encode(srcs)
# logger.debug(f"{type(sentence_embeddings)}, {sentence_embeddings.shape}, {sentence_embeddings[0].shape}")

print('calculate spearman')
spearman=calc_similarity_scores(model, srcs, trgs, labels)
print(spearman)