import ast
import asyncio
import logging
import os
import sqlite3
import uuid
from logging.handlers import TimedRotatingFileHandler
from typing import Optional, Dict
from pydantic import BaseModel
from fastapi import APIRouter
from tqdm.asyncio import tqdm as tqdm_async
from pathlib import Path
from typing import List, Tuple
import pandas as pd
from openai import AsyncOpenAI
from collect_db_data import get_qd_name, get_batche_index
from config import (
    IO_FEW_SHOTS, STAGE1_PROMPT, I_FEW_SHOTS, STAGE2_1,
    OPT_FORMAT, STAGE1_PROMPT_WITH_BUJI, LLM_ANSWER_HEAD, PROJECT_ROOT, BCX_TEST
)

from milvus_op import MilvusOP

import re
import json


def extract_json_with_key(text, required_key="定额标准名称"):
    # 正则提取所有大括号包裹的结构（宽松匹配）
    json_candidates = re.findall(r'\{.*?\}', text, re.DOTALL)
    except_info = []
    valid_jsons = []
    for candidate in json_candidates:
        try:
            obj = ast.literal_eval(candidate)
            if isinstance(obj, dict) and required_key in obj:
                valid_jsons.append(obj)
        except json.JSONDecodeError as e:
            except_info.append(str(e))
            continue  # 跳过解析失败的部分
    return valid_jsons, except_info


router = APIRouter()

retrieval_config = {'sparse': 1.0, 'dense': 0.0}


def extract_fields(res) -> Tuple[str, List[str], List[str], List[str]]:
    input_text = json.loads(res.fields['input'])
    outputs = json.loads(res.fields['output'])
    mod_list = [opt['定额依据'] for opt in outputs]
    standard_list = [opt['定额名'] for opt in outputs]
    dekus_list = [opt['定额库'] for opt in outputs]
    return input_text, mod_list, standard_list, dekus_list


def build_few_shot_examples(
        results: List[Tuple[str, List[str], List[str], List[str]]],
        shots_num: int
) -> Tuple[str, str, List[str], str]:
    unique_sdl = set()
    io_shots, i_shots, a1_results = [], [], []

    for input_text, mod_list, standard_list, dekus_list in results:
        if len(io_shots) >= shots_num:
            break
        res_dict = {
            '定额库名称': dekus_list,
            '定额标准名称': standard_list,
            '人工修改名称': mod_list
        }
        res_str = str(res_dict)
        unique_sdl.update(standard_list)

        io_shot = IO_FEW_SHOTS.format(text=input_text, res_lst=res_str)
        i_shot = I_FEW_SHOTS.format(text=input_text)

        if io_shot not in io_shots:
            io_shots.append(io_shot)
            i_shots.append(i_shot)
            a1_results.append(res_str)

    sdl = "\n".join(f"    - {item}" for item in unique_sdl)
    io_str = "\n".join(f"## 示例{idx + 1}：{shot}" for idx, shot in enumerate(io_shots))
    i_str = "\n".join(f"## 清单{idx + 1}：{shot}" for idx, shot in enumerate(i_shots))
    return io_str, i_str, a1_results, sdl


def get_prompts(
        search_results: List[List], u2_queries: List[str], shots_num: int = 3
) -> Tuple[List[str], List[str], List[str]]:
    chain_prompts, u1_queries, a1_results = [], [], []
    for topk, query in zip(search_results, u2_queries):
        extracted = [extract_fields(res) for res in topk]
        io_str, i_str, res_list, sdl = build_few_shot_examples(extracted, shots_num)

        template = STAGE1_PROMPT_WITH_BUJI if any(k in query for k in ("不计", "另计")) else STAGE1_PROMPT
        chain_prompts.append(
            template.format(io_shots=io_str, query=query, shots_num=shots_num, sdl=sdl)
        )
        u1_queries.append(STAGE2_1.format(i_shots=i_str, opt_format=OPT_FORMAT))
        a1_results.append(str(res_list))
    return chain_prompts, u1_queries, a1_results


def load_qd_dataframe(filepath: Path, sheet_name: int = 1) -> pd.DataFrame:
    df = pd.read_excel(filepath, sheet_name=sheet_name, dtype={'清单编号': str})
    df.set_index('清单编号', inplace=True)
    return df.fillna('')


def prepare_queries(qd_list, qd_df):
    retl_queries, llm_queries, answer_heads = [], [], []
    msgs_dict = {}
    for item in qd_list:
        domain = item['ZhuanYe']
        qd_code = item['QDKu'] + '-' + item['BianHao']
        qd_id = item['QDID']
        qd_name, msg = get_qd_name(qd_id, qd_code, qd_df, item['MingCheng'])
        if msg:
            msgs_dict[qd_id] = msg
        query_json = {
            '清单专业': domain,
            '清单名': qd_name,
            '清单特征': item['TeZheng'],
            '清单单位': item['DanWei']
        }
        shot = f"## 查询清单:\n```json\n{query_json}\n```"
        answer_heads.append(LLM_ANSWER_HEAD.format(BZ='BZ', query=shot))
        retl_queries.append(f"专业：{domain}\n名称：{qd_name}\n特征：\n{item['TeZheng']}\n单位：{item['DanWei']}")
        llm_queries.append(shot)
    return retl_queries, llm_queries, answer_heads, msgs_dict


def parse_out(predict_data, batch_answers, closest_results, project_id):
    results = []
    msg_dict = {}

    conn = sqlite3.connect(PROJECT_ROOT / 'db_data' / 'sqlite_data' / 'quota.db')  # 从定额数据库中补全信息
    cursor = conn.cursor()
    for predict_data, llm_result, closest_result in zip(predict_data, batch_answers, closest_results):
        qd_id = predict_data['QDID']
        out_data = {'QDID': qd_id}
        contents, except_info = extract_json_with_key(llm_result)
        if contents:
            ming_cheng = contents[-1]['定额标准名称']
            placeholders = ', '.join(['?'] * len(ming_cheng))  # 生成 "?, ?, ?"
            sql = f"SELECT KuId, BianHao, DanWei, MingCheng FROM {project_id} WHERE MingCheng IN ({placeholders})"
            cursor.execute(sql, ming_cheng)
            rows = cursor.fetchall()
            out_data['LstDE'] = [
                {"DEKu": row[0], "BianHao": row[1], "DanWei": row[2], "MingCheng": row[3]} for row in rows
            ]
        else:  # 定额LLM预测失败，找最近似的匹配
            out_data['LstDE'] = closest_result
            msg_dict[
                qd_id] = f"“QDID：{qd_id}” 的LLM 结果解析失败。当前仅返回了知识库中与预测清单最相近样例的定额清单。LLM 的返回内容为：{llm_result}"
        results.append(out_data)

    cursor.close()
    conn.close()
    return results, msg_dict


class PredictRequest(BaseModel):
    predict_data: List[Dict]
    project_id: str
    public_model_name: str
    page_size: Optional[int] = None
    page: Optional[int] = 1
    shot_num: Optional[int] = 3
    part_name: Optional[List[str]] = None
    base_url: Optional[str] = 'http://localhost:4000'
    api_key: Optional[str] = "sk-1234"
    dir_save: Optional[str] = PROJECT_ROOT / 'log_dir'


def config_logger(dir_save):
    handler = TimedRotatingFileHandler(
        filename=os.path.join(dir_save, 'run.log'),
        when='D',  # 每天一个文件
        interval=1,
        backupCount=7,  # 保留 7 天
        encoding='utf-8'
    )

    logging.basicConfig(
        handlers=[handler],
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(filename)s - %(message)s'
    )


async def ask_llm(prompt, client, public_model_name):
    try:
        response = await client.chat.completions.create(
            model=public_model_name,  # 替换为你的模型名
            messages=[{"role": "user", "content": prompt}],
            stream=False,
            extra_body={"enable_thinking": False}
        )
        return response.choices[0].message.content
    except Exception as e:
        return f"[ERROR] {e}"


async def generate_answers(prompts, client, public_model_name):
    tasks = [ask_llm(prompt, client, public_model_name) for prompt in prompts]
    results = await asyncio.gather(*tasks)
    return results


@router.post("/")
async def predict(req: PredictRequest):
    try:
        # config_logger(req.dir_save)
        uuid_str = str(uuid.uuid5(uuid.NAMESPACE_DNS, json.dumps(req.predict_data)))
        with open(PROJECT_ROOT / 'cache' / 'predict_cache.json', 'r', encoding='utf-8') as f:
            cache_data = json.load(f)
        if req.project_id not in cache_data or uuid_str not in cache_data[req.project_id]:
            llm_client = AsyncOpenAI(api_key=req.api_key, base_url=req.base_url)
            milvus_op = MilvusOP(db_name='QD_db', collection_name=req.project_id)
            qd_df = pd.read_pickle(PROJECT_ROOT / 'quota_df' / 'data.pkl')  # 加载我保留的清单库
            retl_queries, llm_queries, answer_heads, qd_not_found_msgs = prepare_queries(req.predict_data, qd_df)
            batch_domains = list(set(i['ZhuanYe'] for i in req.predict_data))
            retrieval_shots = milvus_op.hybrid_search(
                retl_queries, limit=req.shot_num, config=retrieval_config, expr=batch_domains
            )
            closest_results = [json.loads(shot[0].fields['raw'])['LstDE'] for shot in retrieval_shots]
            batch_prompts, _, _ = get_prompts(retrieval_shots, llm_queries, req.shot_num)
            batch_answers = await generate_answers(batch_prompts, llm_client, req.public_model_name)
            results, fail_parsed_msgs = parse_out(req.predict_data, batch_answers, closest_results, req.project_id)
            total = len(results)
            cache_data = {
                req.project_id: {
                    uuid_str: {'total': total, 'result': results, 'fail_parsed_msg': fail_parsed_msgs,
                               'qd_not_found_msg': qd_not_found_msgs}
                }
            }
            with open(PROJECT_ROOT / 'cache' / 'predict_cache.json', 'w', encoding='utf-8') as f:
                json.dump(cache_data, f, ensure_ascii=False, indent=2)
        else:
            cache_data = cache_data[req.project_id][uuid_str]
            total = cache_data['total']
            results = cache_data['result']
            fail_parsed_msgs = cache_data['fail_parsed_msg']
            qd_not_found_msgs = cache_data['qd_not_found_msg']
        if not req.page_size:
            req.page_size = total
        start, end = get_batche_index(total, req.page_size)[req.page - 1]
        _results, _msgs = results[start:end], []
        for item in _results:
            qd_id = item['QDID']
            if qd_id in qd_not_found_msgs:
                _msgs.append(qd_not_found_msgs[qd_id])
            if qd_id in fail_parsed_msgs:
                _msgs.append(fail_parsed_msgs[qd_id])
        batch_results = {'total': total, 'data': _results}
    except Exception as e:
        return {'res': str(e), "code": 1, 'msg': 'fail'}
    return {'res': batch_results, "code": 0, 'msg': '\n'.join(_msgs)}
