# -*- coding: utf-8 -*-
# Author: Li Qingquan
# tf-idf匹配类案

import os
import json
import jieba
from tqdm import tqdm
from gensim import corpora, models, similarities
import numpy as np

in_path = '/home/ljw22/workspace/qwen/MUSER-main/cases_pool.json'
out_path = '/home/ljw22/workspace/qwen/MUSER-main/tfidf_top100.json'

in_file = open(in_path, 'r', encoding='utf-8')
cases_pool = json.load(in_file)

corpus_path = '/home/ljw22/workspace/qwen/MUSER-main/data/cases/corpus.json'
corpus_file = open(corpus_path, 'r', encoding='utf-8')
raw_corpus = json.load(corpus_file)

stopword_path = '/home/ljw22/workspace/qwen/MUSER-main/data/utils/stopword.txt'
stopword_file = open(stopword_path, 'r', encoding='utf-8')
lines = stopword_file.readlines()
stopwords = [i.strip() for i in lines]
stopwords.extend(['.','（','）','-', '', '【', '】'])

train_test_path = '/home/ljw22/workspace/qwen/MUSER-main/data/cases/train_test.json'
train_test_file = open(train_test_path, 'r')
train_test = json.load(train_test_file)
test_querys = train_test['train'] + train_test['test']

qc_pairs_path = '/home/ljw22/workspace/qwen/MUSER-main/data/cases/cands_by_query.json'
qc_pairs_file = open(qc_pairs_path, 'r')
qc_pairs = json.load(qc_pairs_file)


feature_path = "/home/ljw22/workspace/qwen/MUSER-main/data/features/72B_feature.json"

with open(feature_path) as fin:
    feature = json.load(fin)

docid_list = list(feature.keys())

docid_id_dict = {}

for id, docid in enumerate(docid_list):
    docid_id_dict[docid] = id
    
feature_matric = []
for doc in docid_list:
    feature_matric.append(feature[doc])  

docid_id_dict = {}
for i, docid in enumerate(docid_list):
    docid_id_dict[docid] = i
A = np.array(feature_matric)

print("计算余弦相似度")
# 计算每一行的 L2 范数（每个特征的长度）
norms = np.linalg.norm(A, axis=1, keepdims=True)

# 计算余弦相似度矩阵
norms[norms == 0] = 1  # 可以把零范数的行设为1，防止除以0

# 计算余弦相似度矩阵
similarity_matrix = np.dot(A, A.T) / (norms * norms.T)


feature_matric = []
for doc in docid_list:
    feature_matric.append(feature[doc])  

docid_id_dict = {}
for i, docid in enumerate(docid_list):
    docid_id_dict[docid] = i

dictionary = corpora.Dictionary(raw_corpus)
corpus = [dictionary.doc2bow(i) for i in raw_corpus]
tfidf = models.TfidfModel(corpus)
num_features = len(dictionary.token2id.keys())
index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=num_features)

tfidf_top100 = {}

alpha_list = np.arange(0, 1.1, 0.1)
for alpha in alpha_list:
    print(alpha)
    for qid in tqdm(test_querys):
        array_idx = docid_id_dict[qid]
        sim_scores = []
        query_text = ''
        for part in ['本院查明', '本院认为']:
            for sent in cases_pool[qid]['content'][part]:
                query_text += sent
        query_jieba = jieba.cut(query_text, cut_all=False)
        query_tmp = ' '.join(query_jieba).split()
        query_cutted = [w for w in query_tmp if w not in stopwords]
        query_vec = dictionary.doc2bow(query_cutted)
        sim = index[tfidf[query_vec]]
        for idx, score in zip(cases_pool.keys(), sim):
            cand_idx = docid_id_dict[idx]
            i = int(idx)
            if qid == i or i not in qc_pairs[qid]:
            # if int(qid) == i:
                continue
            mscore = alpha * score + (1 - alpha) * similarity_matrix[array_idx][cand_idx]
            sim_scores.append((idx, mscore))
        # assert len(sim_scores) == 100
        sim_scores.sort(key=lambda x:x[1], reverse=True)
        # print(sim_scores)
        cnt = 0
        tfidf_top100[qid] = []
        for idx, score in sim_scores:
            if cnt >= 100:
                break
            tfidf_top100[qid].append(idx)
            cnt += 1
        assert len(tfidf_top100[qid]) == 100

    out_folder = '/home/ljw22/workspace/qwen/MUSER-main/data/my_predictions/tfidf_pre_72B'
    os.makedirs(out_folder, exist_ok=True)
    out_path = os.path.join(out_folder, f'alpha-{alpha:.1f}.json')
    out_file = open(out_path, 'w')
    json.dump(tfidf_top100, out_file)
    out_file.close()