# ----------------------- A built-in module in Python. -------------------------
import os
import json
import time
import copy
import argparse
import multiprocessing
from multiprocessing import Pool
from joblib import Parallel, delayed

# ---------------------- A third-party module in Python. -----------------------
# from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from joblib import Parallel, delayed
import numpy as np
import pandas as pd
import json
from scipy.sparse import csr_matrix
from sklearn.metrics.pairwise import cosine_similarity

# ---------------------- Modules related to large models. ----------------------
from zhipuai import ZhipuAI

import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['LOKY_MAX_CPU_COUNT'] = '1'  # 设置为你想要使用的核心数



def compute_cosine_similarity_chunk(vectors, start, end):
    """计算向量列表中一部分的余弦相似度。"""
    chunk = vectors[start:end]
    return cosine_similarity(chunk, vectors)


def text_groups(df, groups_num, n_jobs=None):
    """ 
        将文本聚类为指定数量的组。
    """

    df["embedding"] = df["embedding"].apply(json.loads)
    # print("\n01. 文件或表格读取/生成完成！ \n")
    
    # 将文本向量从列表转换为NumPy数组。
    vectors = np.array([vec for vec in df["embedding"].tolist()])
    df_dropped = df.drop(columns=["embedding"])
    # print("\n02. 文本向量成功转换为NumPy数组！ \n")
    
    # 使用并行计算计算分块余弦相似度矩阵。
    n = len(vectors)
    if n_jobs is None:
        n_jobs = 4
    n_jobs = 1
    chunk_size = n // n_jobs
    results = Parallel(n_jobs=n_jobs)(
        delayed(compute_cosine_similarity_chunk)(
            vectors, i * chunk_size, min((i + 1) * chunk_size, n)
        ) 
        for i in range(n_jobs)
    )
    
    # 合并分块结果为完整的相似度矩阵。
    cos_sim_matrix = np.vstack(results)
    # print("\n03. 余弦相似度计算完成！ \n")
    
    # 使用K-means进行聚类。
    # kmeans = KMeans(n_clusters=groups_num, random_state=42)
    kmeans = MiniBatchKMeans(n_clusters=groups_num, random_state=42)
    labels = kmeans.fit_predict(vectors)
    # print("\n04. K-means聚类完成！ \n")
    
    # 将类别标签添加到原始DataFrame。
    df_dropped['cluster_label'] = labels
    # print(df_dropped)
    
    return df_dropped


def label_concept_tree_cluster(label_df,  concept_dict, output_file):
    """ 
        一个标签与概念树概念的文本向量合并为一个表格，
        然后做聚类，获取这个标签所处聚类对应的概念 
    """
    title = label_df.columns.tolist()
    concept_list = []
    for k,v in concept_dict.items():
        for k_1,v_1 in v.items():
            concept = v_1["concept"]
            ebedding = v_1["ebedding"]
            concept_list.append([concept, ebedding])
            
    concept_df = pd.DataFrame(concept_list, columns=title)
    
    result = {}
    for index, row in enumerate(label_df.values.tolist()[0:]):
        concept = row[0]
        concept_df_tmp = copy.deepcopy(concept_df)
        concept_df_tmp.loc[len(concept_df_tmp)] = row 
        concept_group = text_groups(concept_df_tmp, 20)
        groups = concept_group.groupby("cluster_label")
        for key, sub_df in groups:
            is_in = (sub_df['feature'] == concept).any()
            if is_in:
                result[concept] = sub_df["feature"].tolist()
        print(f"*** {index} 已经完成!")
                
    with open(output_file, mode="w",encoding="utf-8") as file:
        json.dump(result, file, indent=4)


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("-label_df","--label_df", help="每个物种所有基因簇功能标签列表")
    parser.add_argument("-concept_json","--concept_json", help="物种概念树字典")
    parser.add_argument("-output_file","--output_file", help="输出文件路径")
    params = parser.parse_args()
    
    return params


if __name__ == "__main__":
    params = parse_args()
    
    label_path = params.label_df
    concept_json_path = params.concept_json
    output_file = params.output_file
    
    label_df = pd.read_csv(label_path, encoding="utf-8")
    
    with open(concept_json_path, encoding="utf-8") as file:
        concept_dict = json.load(file)
        
    label_concept_tree_cluster(label_df, concept_dict, output_file)