
"""
    输入的标签文本向量化，存入一个表格中，表格两列，第一列是标签名，第二列是标签的向量
"""


# ----------------------- A built-in module in Python. -------------------------
import os
import json
import time
import argparse
import multiprocessing
from multiprocessing import Pool

# ---------------------- A third-party module in Python. -----------------------
import pandas as pd

# ---------------------- Modules related to large models. ----------------------
from zhipuai import ZhipuAI



######################## Text vectorization. ###################################
def embedding_3(text_list: list):
    """
        Call the online API interface of embedding-3
        to generate a list of text vectors.
    """
    client = ZhipuAI(
        api_key="e7794ecfd23516cd297dc83e7f57a0e9.1ta3BPaDit6LuXJj"
    )
    response = client.embeddings.create(
        model = "embedding-3",
        input = text_list,
    )
    import random
    random_numbers = [random.randint(0, 9) for _ in range(4)]
    result = '-'.join(map(str, random_numbers))
    print("---------- Performing text vectorization " + result + " \t", len(text_list))

    return dict(response)


def embedding_to_list(feature_list, embedding_response):
    """ Convert the results returned by embedding-3 into a list. """
    length = len(feature_list)
    embedding_response = dict(embedding_response)
    embedding_result = [""] * length
    for i in embedding_response["data"]:
        i = dict(i)
        index = i["index"]
        embedding = i["embedding"]
        embedding_result[index] = embedding

    return embedding_result


def multi_embedding_to_list(chunk):
    """ Multi-process text vectorization execution function. """
    ## 如果结果报错，那么就重新用大模型生成，直到不报错为止
    while True:
        try:
            response_dict = embedding_3(chunk)
            vectors = embedding_to_list(chunk, response_dict)
            break
        except:
            pass
    return vectors


def process_and_embed(text_list: list[str]) -> list[list[float]]:
    """
        Process the text list,
        use multiple processes to call embedding_3 to generate vectors,
        and merge the results.
    """
    all_vectors = []
    # A task calculates the text vectors for 50 words.
    task_list = []
    for i in range(0, len(text_list), 50):
        chunk = text_list[i:i+50]
        task_list.append((chunk,))
    cpu_count = multiprocessing.cpu_count()
    with Pool(cpu_count - 4) as pool:
        results = pool.starmap(multi_embedding_to_list, task_list)
    for vectors in results:
        try:
            all_vectors.extend(vectors)
        except Exception as e:
            print("\n\n\n-------------------- error ---------------------")
            print(e)
            print(vectors)
            print("--------------------  error ---------------------\n\n\n")
    print("\n\n\n")

    return all_vectors



def text_vector_embedding3(label_input, output):
    """ Main """
    # Read the file and generate a list of keywords.
    with open(label_input, encoding="utf-8") as file:
        feature_list = file.read().strip().split("\n")

    # Remove duplicates from the input list of words.
    feature_list = list(set(feature_list))

    print("\nIn the file, the number of words is ", len(feature_list), "\n")
    # Convert the list of words into text vectors.
    feature_ebbemdding_list = process_and_embed(feature_list)
    # Put the text vectorization results into a table.
    data = [list(x) for x in zip(feature_list, feature_ebbemdding_list)]
    feature_df = pd.DataFrame(data, columns=["feature","embedding"])
    feature_df.to_csv(output, index=None)


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("-label_input", "--label_input")
    parser.add_argument("-output", "--output")
    params = parser.parse_args()

    return params


if __name__ == "__main__":
    params = parse_args()

    label_input = params.label_input
    output = params.output

    text_vector_embedding3(label_input, output)

