from vector_search import VectorSearch
from sentence_transformers import SentenceTransformer
from tqdm import tqdm
# import json
from my_py_toolkit.file.file_toolkit import *

"""
向量检索度量

1 按集合进行度量
    1.1 对同一集合内，每个元素进行检索，度量检索结果中有多少属于当前集合。
    1.2 使用 top1、top5 度量指标
    1.3 考虑下集合元素个数 < topk 的情况
2 输入数据
    2.1 [ [xxx, xxx], ... ]
"""
# def writejson(data, file_path):
#   """"""
#   make_path_legal(file_path)
#   with open(file_path, "w", encoding="utf-8") as f:
#     f.write(json.dumps(data, ensure_ascii=False, indent=2))

# def readjson(file_path):
#   """"""
# #   make_path_legal(file_path)
#   with open(file_path, "r", encoding="utf-8") as f:
#     return json.load(f)

def get_score(text, val_set, search_res):
    val_set = set(val_set) - set([text])
    sear_set = set(list(search_res)) - set([text])

    denominator = len(val_set) if len(val_set) < len(search_res) else len(search_res)
    numerator = len(val_set & sear_set)
    if denominator == 0:
        return 0 
    else:
        score = numerator / denominator
        return score


def metric(model_path, embedding_file, embedding_content_json_file, val_search_res_path, 
           val_datas, topk=6, device=None, model_cls=SentenceTransformer, remove_top1=True):
    vec_sea = VectorSearch(model_path, embedding_file, embedding_content_json_file, device, model_cls)
    writer = open(val_search_res_path, 'w', encoding='utf-8')
    scores = []

    pbar = tqdm(val_datas)
    for val_data in pbar:
        if len(val_data) == 1:
            continue

        for val in val_data:
            cur_sea = vec_sea.search(val, topk)
            cur_sea = [v[0] for k,v in cur_sea.items()]
            # top1 是自己删掉
            if remove_top1:
                cur_sea = cur_sea[1:]
            score = get_score(val, val_data, cur_sea)
            scores.append(score)
            writer.write(json.dumps([val, val_data, cur_sea], ensure_ascii=False) + '\n')
        pbar.set_description(f'avg score: {sum(scores)/len(scores):.4f}')

    print(f'Metrics  avg score: {sum(scores)/len(scores):.4f}')
    return sum(scores), sum(scores)/len(scores), scores


if __name__ == '__main__':
    topk = 6
    model_path = '/home/centos/ll/models/SimCSE-Chinese-Pytorch/roberta-wwm-finetune'
    embedding_file = './jibing_zhengzhuang_set.npy'
    embedding_content_json_file = './jibing_zhengzhuang_set.json'
    val_data_path = './jibing_zhengzhuang.json'
    val_search_res_path = './val_search_res.jsonl'
    # model_path = '/home/centos/ll/simcse_V2/SimCSE-Chinese-Pytorch/roberta-wwm-ll'
    # embedding_file = '/home/centos/ll/simcse_V2/fassis_2/fassis_/ask_embedding.npy'
    # embedding_content_json_file = '/home/centos/ll/simcse_V2/fassis_2/fassis_/all_symtoms_10.json'
    # val_data_path = '/home/centos/ll/simcse_V2/SimCSE-Chinese-Pytorch/dataset/all_symtoms_subset.json'
    val_datas = readjson(val_data_path)
    score_sum, avg_score, scores = metric(model_path, embedding_file, embedding_content_json_file, val_search_res_path, val_datas, topk)
    writejson(scores, './val_res.json')

