import json


def graphPrompt(client, input: str, metadata={}, model="ep-20250227161813-t6vpw"):
    # 如果未提供模型名称，则使用默认的 "mixtral:latest"
    if model is None:
        model = "ep-20250227161813-t6vpw"

    # 从元数据中获取chunk_id，如果不存在则为None
    chunk_id = metadata.get('chunk_id', None)

    # 以下代码被注释掉，可能用于显示模型信息
    # model_info = client.show(model_name=model)
    # print(chalk.blue(model_info))

    # 定义系统提示，指导LLM如何提取术语和关系
    SYS_PROMPT = (
        "You are a network graph maker who extracts terms and their relations from a given context. "
        "You are provided with a context chunk (delimited by ```) Your task is to extract the ontology "
        "of terms mentioned in the given context. These terms should represent the key concepts as per the context. \n"
        "Thought 1: While traversing through each sentence, Think about the key terms mentioned in it.\n"
        "\tTerms may include person (agent), location, organization, date, duration, \n"
        "\tcondition, concept, object, entity  etc.\n"
        "\tTerms should be as atomistic as possible\n\n"
        "Thought 2: Think about how these terms can have one on one relation with other terms.\n"
        "\tTerms that are mentioned in the same sentence or the same paragraph are typically related to each other.\n"
        "\tTerms can be related to many other terms\n\n"
        "Thought 3: Find out the relation between each such related pair of terms. \n\n"
        "Format your output as a list of json. Each element of the list contains a pair of terms"
        "and the relation between them like the follwing. NEVER change the value of the chunk_ID as defined in this prompt: \n"
        "中文输出要求：请用中文进行输出，所有的术语、关系描述等内容都必须为中文。\n"
        "[\n"
        "   {\n"
        '       "chunk_id": "CHUNK_ID_GOES_HERE",\n'
        '       "node_1": "从提取的本体中得到的一个概念",\n'
        '       "node_2": "从提取的本体中得到的一个相关概念",\n'
        '       "edge": "节点1和节点2这两个概念之间的关系，用一到两句话描述"\n'
        "   }, {...}\n"
        "]"
    )
    # 将系统提示中的占位符替换为实际的chunk_id
    SYS_PROMPT = SYS_PROMPT.replace('CHUNK_ID_GOES_HERE', str(chunk_id))

    # 定义用户提示，包含输入的文本
    USER_PROMPT = f"context: ```{input}``` \n\n output: "

    # 调用LLM生成响应
    response = client.chat.completions.create(
        model="ep-20250227161813-t6vpw",
        messages=[
            {"role": "system", "content": SYS_PROMPT},
            {"role": "user", "content": USER_PROMPT}
        ]
    )

    # 提取响应中的文本部分
    aux1 = response.choices[0].message.content
    # 找到JSON字符串的起始位置
    start_index = aux1.find('[')

    # 截取JSON字符串部分
    json_string = aux1[start_index:]

    # 处理一些可能的转义字符问题
    json_string = json_string.replace('\\\\_', '_')
    json_string = json_string.replace('\\_', '_')
    json_string = json_string.replace('\_', '_')

    # 去除字符串开头的空白字符
    json_string = json_string.lstrip()

    # 打印JSON字符串
    print("json-string:\n" + json_string)

    try:
        # 尝试将JSON字符串解析为Python对象
        result = json.loads(json_string)
        # 将结果转换为字典列表
        result = [dict(item) for item in result]
    except Exception as e:
        # 如果解析失败，打印错误信息和原始响应
        print("\n\nERROR ### Here is the buggy response: ", response, "\n\n")
        result = None

    # 打印分隔线
    print("§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§")

    return result
