import time
import os
import requests
from time import sleep
from pprint import pprint
import json
from os import getenv

from llama_index.core import (
    VectorStoreIndex,
    SimpleDirectoryReader,
    Settings,
    get_response_synthesizer,
)
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.postprocessor import SimilarityPostprocessor
from llama_index.core.embeddings import resolve_embed_model
from llama_index.llms.ollama import Ollama
from llama_index.embeddings.ollama import OllamaEmbedding
from llama_index.core.retrievers import QueryFusionRetriever
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.core.data_structs.data_structs import IndexDict
from django.conf import settings
import erniebot


# 替换以下值
PERSONAL_ACCESS_TOKEN = getenv("COZE_TOKEN")
BOT_ID = "7385757816613077026"
USER_ID = "jsjsj"


Settings.llm = llm = Ollama(model="llama2-chinese", request_timeout=60.0)
Settings.embed_model = OllamaEmbedding(model_name="nomic-embed-text")

erniebot.ak = os.getenv("ak")
erniebot.sk = os.getenv("sk")


def yes_or_no(requirement, x, positives=(), negatives=()):
    cases = "\n".join(
        [""]
        + [
            f"正向示例{i}\n表述：{stat}\n输出：Yes\n"
            for i, stat in enumerate(positives, 1)
        ]
        + [
            f"反向示例{i}\n表述：{stat}\n输出：No\n"
            for i, stat in enumerate(negatives, 1)
        ]
        + [""]
    )
    prompt = (
        f"您是医疗领域的专业客户服务代表，能够准确评估患者的意图。\n"
        f"现在你需要确定患者是否{requirement}。如果是，则输出Yes，如果不是，则输出No。\n"
        f"注意：直接输出“是”或“否”，不要输出任何其他结果！\n"
        f"注意：不要轻易输出Yes，只有当情况确实如此时，你才能输出Yes！\n"
        f"{cases}\n"
        f"患者表述：{x}\n"
        f"您的判断（“Yes”或“No”）："
    )
    # prompt = (
    #     f"You are a professional customer service representative in the medical field "
    #     f"who can accurately assess the patient's intentions.\n"
    #     f"Now you need to determine whether the patient has {requirement}. If so, output Yes, and if not, output No.\n"
    #     f"NOTE: Directly output 'Yes' or 'No', do not output any other results!\n"
    #     f"NOTE: Don't easily output Yes, only when the situation is indeed like this can you output Yes!\n"
    #     f"{cases}\n"
    #     f"\n"
    #     f"Patient statement: {x}\n"
    #     f"Your judgment('Yes' or 'No'):"
    # )
    print(prompt)
    response = llm.complete(prompt)
    print(response)
    if "Yes" in response.text and "No" not in response.text:
        return True
    else:
        return False


def decide_in_outs(stat, mock):
    if mock:
        time.sleep(3)
        return {"in_physiologic_index": True}
    in_outs = {}

    in_outs["in_physiologic_index"] = yes_or_no(
        requirement="需要查询生理体征记录",
        x=stat,
    )
    # eg. “帮我查询最近一周的体温记录”

    in_outs["in_case_history"] = yes_or_no(
        requirement="需要查询既往病史",
        x=stat,
    )
    # eg. “我有哪些既往病史？”

    in_outs["in_pharmacopoeia_western"] = yes_or_no(
        requirement="需要查询西药药典",
        x=stat,
    )
    # eg. “布洛芬缓释胶囊是什么？”

    in_outs["in_pharmacopoeia_traditional_chinese"] = yes_or_no(
        requirement="需要查询中药药典",
        x=stat,
    )
    # eg. “连花清瘟胶囊是什么？”

    in_outs["in_medical_handbook"] = yes_or_no(
        requirement="需要查询医疗手册",
        x=stat,
    )
    # eg. “腹痛怎么办？”

    in_outs["out_symptom"] = yes_or_no(
        requirement="正在表述自己的症状",
        x=stat,
    )
    # eg. “我可能感冒了，有些头晕发烧”

    in_outs["out_physiologic_index"] = yes_or_no(
        requirement="正在表述自己的生理指标",
        x=stat,
    )
    # eg. “我现在的血糖指数是8.9”

    in_outs["out_medicine"] = yes_or_no(
        requirement="正在表述自己的用药情况",
        x=stat,
    )
    # eg. “我今天服用了一次抗生素类药物”

    print(in_outs)
    return in_outs


def get_index(name):
    name2summaries = {
        "physiologic_index": "生理指标知识库，记录患者过去的生理指标。",
        "case_history": "既往病史知识库，记录患者过去的症状和医生的诊断。",
        "pharmacopoeia_western": "西医药典知识库，记录各种西药的说明。",
        "pharmacopoeia_traditional_chinese": "中医药典知识库，记录各种中药的说明。",
        "medical_handbook": "医疗手册知识库，记录各种疾病的症状和应对方法。",
    }
    if name not in name2summaries:
        raise ValueError(f"{name=} not in name2summaries")
    persist_dir = settings.LLAMAINDEX_PERSIST_ROOT + name + "/"
    try:
        storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
        index = load_index_from_storage(storage_context)
    except FileNotFoundError:
        index = VectorStoreIndex(index_struct=IndexDict(summary=name2summaries[name]))
        index.storage_context.persist(persist_dir=persist_dir)
    return index


def _get_retriever(indexs):
    retriever = QueryFusionRetriever(
        [index.as_retriever() for index in indexs],
        similarity_top_k=2,
        num_queries=4,  # set this to 1 to disable query generation
        use_async=True,
        verbose=True,
    )
    return retriever


def retrieve(indexs, question):
    retriever = _get_retriever(indexs)
    nodes_with_scores = retriever.retrieve(question)
    return nodes_with_scores


def chat_ernie(question):
    print(f"{question=}")
    try:
        response = erniebot.ChatCompletion.create(
            model="ernie-bot-turbo",
            messages={"role": "user", "content": question},
        )
    except erniebot.errors.RequestLimitError:
        return "[Error %s] Request limit reached." % time.strftime(
            "%Y-%m-%d %H:%M:%S", time.localtime()
        )
    print(f"{response=}")
    answer = response.result
    return answer


def create_chat(content):
    # 构建请求的数据
    data = {
        "bot_id": BOT_ID,
        "user_id": USER_ID,
        "stream": False,
        "auto_save_history": True,
        "additional_messages": [
            {"role": "user", "content": content, "content_type": "text"}
        ],
    }

    # 设置请求头
    headers = {
        "Authorization": f"Bearer {PERSONAL_ACCESS_TOKEN}",
        "Content-Type": "application/json",
    }

    # 发送POST请求
    response = requests.post(
        "https://api.coze.cn/v3/chat",
        headers=headers,
        json=data,
    )

    # 打印响应状态码和响应内容
    print(response.status_code)
    print(response.text)
    resp_j = json.loads(response.text)
    pprint(resp_j)
    chat_id = resp_j["data"]["id"]
    conversation_id = resp_j["data"]["conversation_id"]
    return chat_id, conversation_id


def check_chat(chat_id, conversation_id):
    # 设置请求头
    headers = {
        "Authorization": f"Bearer {PERSONAL_ACCESS_TOKEN}",
        "Content-Type": "application/json",
    }

    # 发送POST请求
    response = requests.post(
        f"https://api.coze.cn/v3/chat/retrieve?chat_id={chat_id}&conversation_id={conversation_id}",
        headers=headers,
    )

    # 打印响应状态码和响应内容
    print(response.status_code)
    print(response.text)
    resp_j = json.loads(response.text)
    pprint(resp_j)
    return resp_j["data"]["status"]


def get_chat(chat_id, conversation_id):
    # 设置请求头
    headers = {
        "Authorization": f"Bearer {PERSONAL_ACCESS_TOKEN}",
        "Content-Type": "application/json",
    }

    # 发送POST请求
    response = requests.post(
        f"https://api.coze.cn/v3/chat/message/list?chat_id={chat_id}&conversation_id={conversation_id}",
        headers=headers,
    )

    # 打印响应状态码和响应内容
    print(response.status_code)
    print(response.text)
    resp_j = json.loads(response.text)
    pprint(resp_j)
    answers = [
        msg_obj["content"] for msg_obj in resp_j["data"] if msg_obj["type"] == "answer"
    ]
    return "\n\n".join(answers)


def chat(indexs, question, mock):
    chat_id, conversation_id = create_chat(question)
    for i in range(30):
        sleep(5)
        status = check_chat(chat_id, conversation_id)
        if status in ["completed", "failed", "requires_action"]:
            break
    return get_chat(chat_id, conversation_id)


def chat_LI(indexs, question):
    retriever = _get_retriever(indexs)
    query_engine = RetrieverQueryEngine.from_args(retriever)
    response = query_engine.query(question)
    return response


def write_indexs(answer, indexs, mock=True):
    if mock:
        time.sleep(3)
        return
