import httpx
import json


from datetime import datetime
import traceback
from langchain_core.messages import HumanMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_deepseek import ChatDeepSeek

from src.decorator import langgraph_decorator
from src.logger import llm_logger
from src.config import its_api_base_url, cyber_link_uuid

from src.choice_api.state import ChoiceAPiState, ColaTimeData, InitRoute
from src.choice_api.prompts import get_time_extraction, get_describe, get_route_prompt
from src.choice_api.utils import parse_cola_time, expand_dict, decimal_to_chinese, percent_to_chinese

from src.choice_api.api.api_PHK import parse_PHK
from src.choice_api.api.api_SYLS import parse_SYLS
from src.choice_api.api.api_internal_online_data import parse_internal_online_data
from src.choice_api.api.api_lab_internal_data import parse_api_lab_data
from src.choice_api.api.api_device_check import parse_device_check
from src.choice_api.api.api_device_check_absence import parse_device_check_absence
from src.choice_api.api.api_device import parse_device
from src.choice_api.api.api_attendance import parse_attendance
from src.choice_api.api.api_electric_consume_record import parse_electric_consume_record
from src.choice_api.api.api_steam_dosage_record import parse_steam_dosage_record
from src.choice_api.api.api_composite_consume_record import parse_composite_consume_record
from src.choice_api.api.api_drug_inventory import parse_drug_inventory
from src.choice_api.api.api_sludge_transport_record import parse_sludge_transport_record

from src.chat_utils import create_chat, create_chat_with_options
from src.choice_api.prompts import extrac_prompt, standardize_prompt, rewrite_prompt
from src.choice_api.state import ExtracDate, StandardizeDate, RewriteText

extrac_prompt_template = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            extrac_prompt
        ),
        ("human", "{text}"),
    ]
)

standardize_prompt_template = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            standardize_prompt
        ),
        ("human", "now is {now}， What was the start and end time of {date_str}")
    ]
)

standardize_list_prompt_template = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            standardize_prompt
        ),
        ("human", "now is {now}， What was the start and end time of {date_str_form} to {date_str_to}")
    ]
)

rewrite_prompt_template = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            rewrite_prompt
        ),
        ("human", "{text} {standardize_date}")
    ]
)


# 问题重写
# def rewrite_question(msg):
#     llm = create_chat(ollama_model="qwen2.5:14b", temperature=0)
#
#     structured_llm = llm.with_structured_output(schema=ExtracDate)
#     standardize_llm = llm.with_structured_output(schema=StandardizeDate)
#     rewrite_llm = llm.with_structured_output(schema=RewriteText)
#     if isinstance(msg, (dict, HumanMessage)):
#         question = msg["api_messages"][0].content
#     else:
#         question = msg
#     # 提取时间实体
#     prompt = extrac_prompt_template.invoke({"text": question})
#     extrac_date = structured_llm.invoke(prompt)
#     # 时间标准化
#     now = datetime.now()
#     if not extrac_date.extrac_date_str:
#         if isinstance(msg, (dict, HumanMessage)):
#             msg["api_messages"][0].content = datetime.now().strftime(
#                 "%Y-%m-%d") + " 00:00:00到" + datetime.now().strftime("%Y-%m-%d") + question
#             return {"rewrite": datetime.now().strftime("%Y-%m-%d") + " 00:00:00到" + datetime.now().strftime(
#                 "%Y-%m-%d") + question}
#     if len(extrac_date.extrac_date_str) < 2:
#         standardize_prompt = (standardize_prompt_template
#         .invoke({
#             "now": now.strftime(extrac_date.format),
#             "date_str": extrac_date.extrac_date_str[0]}))
#         standardize_date = standardize_llm.invoke(standardize_prompt)
#         question_rewrite = question.replace(extrac_date.extrac_date_str[0],
#                                             f"{standardize_date.start_date}到{standardize_date.end_date}")
#     elif len(extrac_date.extrac_date_str) == 2:
#         standardize_prompt = (standardize_list_prompt_template
#         .invoke({
#             "now": now.strftime(extrac_date.format),
#             "date_str_form": extrac_date.extrac_date_str[0],
#             "date_str_to": extrac_date.extrac_date_str[1]
#         }))
#         standardize_date = standardize_llm.invoke(standardize_prompt)
#         question_rewrite = (question.replace(extrac_date.extrac_date_str[0],
#                                              f"{standardize_date.start_date}")
#                             .replace(extrac_date.extrac_date_str[1], f"{standardize_date.end_date}"))
#
#     else:
#         if isinstance(msg, (dict, HumanMessage)):
#             msg["api_messages"][0].content = question
#             return {"rewrite": question}
#         return {"question": question}
#     if not all(question_rewrite.find(element) != -1 for element
#                in [standardize_date.start_date, standardize_date.end_date]):
#         question_rewrite_prompt = (
#             rewrite_prompt_template.invoke({"text": question, "standardize_date": standardize_date}))
#         question_rewrite = rewrite_llm.invoke(question_rewrite_prompt).rewrite_text
#     if isinstance(msg, (dict, HumanMessage)):
#         msg["api_messages"][0].content = question_rewrite
#         return {"rewrite": question_rewrite}
#     return {"question": question_rewrite}
#

@langgraph_decorator
def rewrite_time(question):
    llm = create_chat_with_options(
        ollama_model="qwen2.5:7b",
        options={
            "num_ctx": 30009,
            "temperature": 0
        }
    )

    llm_logger.info(f"rewrite_time: begin: {question}")

    time_extraction_prompt = get_time_extraction(question)

    cola_time = llm.with_structured_output(ColaTimeData).invoke(time_extraction_prompt)

    llm_logger.info(f"rewrite_time: cola_time: {cola_time}")

    print(cola_time)

    question_cp = question

    if not cola_time.time or not cola_time.time or cola_time.time == "NONETIME" or "NONETIME" in cola_time.time:
        # x 没有给时间，查过去一天，则-
        # √ 查当天
        date = datetime.now()
        # date_str = date.strftime("%Y-%m-%d")
        # previous_day = date - timedelta(days=1)
        # previous_day_str = previous_day.strftime("%Y-%m-%d")
        # return previous_day_str, date_str
        day_str = date.strftime("%Y-%m-%d")
        question_cp = f"【{day_str} 00:00:00~{day_str} 23:59:59】，{question_cp}"
        llm_logger.info(f"rewrite_time: end: 【NONETIME】 {question_cp}")
        return question_cp

    old_time_lst, new_time_lst = parse_cola_time(cola_time.time)

    llm_logger.info(f"rewrite_time: replace: \n【old_time_lst】 {old_time_lst}\n【new_time_lst】 {new_time_lst}")
    print("old_time_lst", old_time_lst)
    print("new_time_lst", new_time_lst)

    for old_time_str, new_time_str in zip(old_time_lst, new_time_lst):
        start_time, end_time = new_time_str
        start_time_str = start_time.strftime("%Y-%m-%d %H:%M:%S")
        end_time_str = end_time.strftime("%Y-%m-%d %H:%M:%S")

        print(old_time_str, start_time_str, end_time_str)
        question_cp = question_cp.replace(old_time_str, f"【{start_time_str}~{end_time_str}】")

    print(question_cp)
    llm_logger.info(f"rewrite_time: end: {question_cp}")
    return question_cp

@langgraph_decorator
def api_request(state: ChoiceAPiState):
    llm_logger.info(f"api_request: begin")
    urls = state.get("urls", None)
    datas = state.get("datas", None)
    res = []

    headers = {
        "cyber-link-uuid": cyber_link_uuid,
        "X-Test-Auth": "true"
    }

    if not urls or len(urls) == 0:
        return {}

    for url, data in zip(urls, datas):
        res_single = []
        array_params = expand_dict(data)
        print(f"api:{url}\nsearch_params:{array_params}", )
        for param in array_params:
            try:

                llm_logger.info(f"api_request: {url}: {param}")
                answer = httpx.post(f"{its_api_base_url}{url}", headers=headers, json=param, timeout=120.0)

            except Exception as e:
                stack_trace = traceback.format_exc()
                llm_logger.error(f"api_request: error: 网络连接失败: {str(e)}\n{stack_trace}")
                res_single.append({"code": 500, "data": "网络连接失败"})
                continue

            if answer.status_code != 200:
                llm_logger.error(f"api_request: request error {answer.status_code}: {answer.text}")
                res_single.append({"code": answer.status_code, "message": answer.text})
            else:
                llm_logger.info(f"api_request: request success {answer.status_code}: {answer.content}")
                if answer.content:
                    res_single.append({"code": answer.status_code, "message": json.loads(answer.content)["data"]})
                else:
                    res_single.append({"code": answer.status_code, "message": answer.content})
        res.append(res_single)

    llm_logger.info(f"api_request: end")
    return {"results": res}

@langgraph_decorator
def api_parse(state: ChoiceAPiState):
    llm_logger.info(f"api_parse: begin")
    def simple_parse(data, target, parse_fun, save_lst):
        llm_logger.info(f"api_parse: {parse_fun} {target}")
        for item in data:
            if item["code"] != 200:
                save_lst.append({"error": item["message"]})
                continue
            res_parse = parse_fun(item["message"], target)
            save_lst.append(res_parse)

    res_parse_lst = []
    # {"res_tip","res_table","res_data","describe","error"}
    origin_results = state.get("results", [])
    urls = state.get("urls", [])
    targets = state.get("targets", [])
    print("targets", targets)

    for url, res, target in zip(urls, origin_results, targets):

        if url == "/effluent/pretty-list1":
            # 上游来水
            simple_parse(res, target, parse_SYLS, res_parse_lst)
        elif url == "/effluent/pretty-list3":
            # 下游排海
            simple_parse(res, target, parse_PHK, res_parse_lst)
        elif url == "/internal-online-data/list":
            # 厂区在线数据
            simple_parse(res, target, parse_internal_online_data, res_parse_lst)
        elif url == "/lab/internal/list":
            # 实验室检测数据
            simple_parse(res, target, parse_api_lab_data, res_parse_lst)
        elif url == "/check-order/device-check/list":
            # 巡检打卡、巡检异常
            simple_parse(res, target, parse_device_check, res_parse_lst)
        elif url == "/device-check-absence/list":
            # 巡检缺卡
            simple_parse(res, target, parse_device_check_absence, res_parse_lst)
        elif url == "/electric-consume-record/list":
            # 用电量
            simple_parse(res, target, parse_electric_consume_record, res_parse_lst)
        elif url == "/composite-consume-record/list":
            # 用水量
            simple_parse(res, target, parse_composite_consume_record, res_parse_lst)
        elif url == "/steam_dosage-record/list":
            # 用蒸汽量
            simple_parse(res, target, parse_steam_dosage_record, res_parse_lst)
        elif url == "/drug-inventory/list":
            # 药剂管理
            simple_parse(res, target, parse_drug_inventory, res_parse_lst)
        elif url == "/sludge-transport-record/list":
            # 污泥外运
            simple_parse(res, target, parse_sludge_transport_record, res_parse_lst)
        elif url == "/attendance/list":
            # 考勤
            simple_parse(res, target, parse_attendance, res_parse_lst)
        elif url == "/device/list":
            # 设备
            simple_parse(res, target, parse_device, res_parse_lst)
        else:
            res_parse_lst.append({})

    res_total_data = {}
    res_total_tip = []
    res_total_table = []
    for item in res_parse_lst:
        if "res_tip" in item:
            res_total_tip.extend(item["res_tip"])
        if "res_table" in item and item["res_table"]:
            res_total_table.append({
                "title": item["res_table_title"],
                "res_table_columns": item["res_table_columns"],
                "res_table": item["res_table"]
            })
        if "res_data" in item and item["res_data"]:
            for column in item["res_columns"]:
                if column in res_total_data:
                    res_total_data[column].extend(item["res_data"][column])
                else:
                    res_total_data[column] = item["res_data"][column]

    res_total_data_real = {}
    for k, v in res_total_data.items():
        if v:
            res_total_data_real[k] = v


    clear_results = {
                "res_total_data": res_total_data_real,
                "res_total_tip": res_total_tip,
                "res_total_table": res_total_table,
                # "origin_descriptions": origin_descriptions,
                # "descriptions": descriptions
            }

    llm_logger.info(f"api_parse: end: {clear_results}")

    return {"clear_results": clear_results}

@langgraph_decorator
def data_describe(state: ChoiceAPiState):
    llm_logger.info(f"data_describe: begin")

    llm = create_chat_with_options(
        ollama_model="qwen2.5:7b",
        options={
            "num_ctx": 30009,
            "temperature": 0
        }
    )


    describe_prompt = get_describe(state["rewrite_q"], state["clear_results"])

    describe = llm.invoke(describe_prompt)
    #         # "origin_descriptions": origin_descriptions,
    #         # "descriptions": descriptions

    descriptions = describe.content
    descriptions = descriptions.replace("(无量纲)","").replace("无量纲","")
    to_remove = "#*"
    translator = str.maketrans('', '', to_remove)
    origin_descriptions = descriptions.translate(translator)


    origin_multi_string_replace = \
        {

            "毫克/升": "mg/L",
            "微西门子/厘米": "us/cm",
            "微克/升": "μg/L",
            "立方米/小时": "m³/H",
            "升/秒": "L/s",
        }

    for key, val in origin_multi_string_replace.items():
        origin_descriptions = origin_descriptions.replace(key, val)

    multi_string_replace = \
        {
            "FLOW": "瞬时流量",
            "pH": "pH",
            "COD": "需氧量",
            "SS": "悬浮物",
            "NH3-N": "氨氮",
            "TP": "总磷",
            "TN": "总氮",
            "TOC": "总碳",
            "COD(+AgNO3)": "需氧量(加硝酸银)",
            "TSS": "悬浮固体物质总量",
            "ALK": "厌氧处理系统内碱度",
            "VFA": "挥发性有机酸",
            "TDS": "总溶解性固体物质",
            "Cond": "电导率",
            "mg/L": "毫克每升",
            "毫克/升":"毫克每升",
            "us/cm": "微西门子每厘米",
            "微西门子/厘米": "微西门子每厘米",
            "MPN/L": "单位微生物每升",
            "mEq/L" :"毫克当量每升",
            "%": "百分比",
            "μg/L": "微克每升",
            "微克/升": "微克每升",
            "L/s":"升每秒",
            "升/秒": "升每秒",
            "KG": "千克",
        }

    descriptions = decimal_to_chinese(descriptions)
    descriptions = percent_to_chinese(descriptions)
    descriptions = descriptions.translate(translator)
    for key, val in multi_string_replace.items():
        descriptions = descriptions.replace(key, val)

    # state["clear_results"]["origin_descriptions"] = origin_descriptions
    # state["clear_results"]["descriptions"] = descriptions

    llm_logger.info(f"data_describe: begin")

    clear_results = {
        "origin_descriptions": origin_descriptions,
        "descriptions": descriptions,
        "res_total_data": state["clear_results"]["res_total_data"],
        "res_total_tip":  state["clear_results"]["res_total_tip"],
        "res_total_table": state["clear_results"]["res_total_table"],
    }

    llm_logger.info({f"clear_results: end: {clear_results}"})

    return {"clear_results": clear_results}


@langgraph_decorator
def router(state: ChoiceAPiState):
    question = state["api_messages"][-1].content.replace(" ","")

    llm_logger.info({f"router: begin: {question}"})


    llm = create_chat_with_options(
        ollama_model="qwen2.5:7b",
        options={
            "num_ctx": 10009,
            "temperature": 0
        }
    )
    route_prompt = get_route_prompt(question)

    answer = llm.with_structured_output(InitRoute).invoke(route_prompt)

    llm_logger.info({f"router: end: {answer.next}"})

    if answer.next == "bingo":
        return "start_entry"
    else:
        return "common_chat"


@langgraph_decorator
def common_chat(state: ChoiceAPiState):
    question = state["api_messages"][-1].content.replace(" ","")
    llm_logger.info({f"common_chat: begin: {question}"})


    llm = create_chat_with_options(
        ollama_model="qwen2.5:7b",
        options={
            "num_ctx": 10009,
            "temperature": 0
        }
    )
    answer = llm.invoke([HumanMessage(content=question)])

    descriptions = answer.content
    to_remove = "#*"
    translator = str.maketrans('', '', to_remove)
    origin_descriptions = descriptions.translate(translator)

    multi_string_replace = \
        {
            "FLOW": "瞬时流量",
            "pH": "pH",
            "COD": "需氧量",
            "SS": "悬浮物",
            "NH3N": "氨氮",
            "TP": "总磷",
            "TN": "总氮",
            "TOC": "总碳",
            "COD(+AgNO3)": "需氧量(加硝酸银)",
            "TSS": "悬浮固体物质总量",
            "ALK": "厌氧处理系统内碱度",
            "VFA": "挥发性有机酸",
            "TDS": "总溶解性固体物质",
            "Cond": "电导率"
        }

    descriptions = decimal_to_chinese(descriptions)
    descriptions = percent_to_chinese(descriptions)
    descriptions = descriptions.translate(translator)
    for key, val in multi_string_replace.items():
        descriptions = descriptions.replace(key, val)




    clear_results = {
        'res_total_data': {},
        'res_total_tip': [],
        'res_total_table': [],
        "origin_descriptions": origin_descriptions,
        "descriptions": descriptions
    }

    llm_logger.info({f"common_chat: end: {clear_results}"})


    return {"clear_results": clear_results}



if __name__ == '__main__':
    rewrite_time(
        "查询过去3周模拟量实际值_FQI_T20203_V的数据进行对比")
