# coding=utf-8
from typing import List
from utils.functions import pretty_print_dict
from collections import OrderedDict
from pprint import pprint
import aiomysql
from utils.utils import read_json_file, remove_json_prefix_suffix
from prompts import prompts
from chains.C5_AI_answer import AI_Answer
import json
from utils.logger import logger

async def calculate_rule_match_score(ai_client: AI_Answer, content_text: str, category_ref: str, rule_list: List[str], batch_size: int = 16, token_usage: dict = {}):

    """
     计算文本与规则的匹配度
    :param content_text:
    :param rule_list:
    :param category:
    :return:
    """

    rule_dict = {str(index): rule for index, rule in enumerate(rule_list)}
    rule_items = list(rule_dict.items())
    match_score_result = {}
    for index in range(0, len(rule_items), batch_size):
        rules_batch = rule_items[index : index + batch_size]
        rules_batch_dict = dict(rules_batch)
        batch_prompt = prompts.CALCULATE_RULE_MATCH_SCORE.format(material=content_text, category_ref = category_ref, rules_dict=rules_batch_dict)
        # 统计具体的评分的token消耗
        if token_usage:
            token_usage['prompt_tokens'] = token_usage.get('prompt_tokens') + ai_client.count_tokens(batch_prompt)
        ai_text_think = ""
        ai_text = ""
        async for text in ai_client.get_ai_answer_R1(batch_prompt, output_json=True):
            text_type = json.loads(text)
            if text_type['type'] == 'thinking':
                ai_text_think += text_type['content']
            else:
                ai_text += text_type['content']
        # 统计具体的评分的token消耗
        if token_usage:
            token_usage['reasoning_tokens'] = token_usage.get('reasoning_tokens') + ai_client.count_tokens(ai_text_think)
            token_usage['completion_tokens'] = token_usage.get('completion_tokens') + ai_client.count_tokens(ai_text)
            token_usage['total_tokens'] = token_usage.get('prompt_tokens') + token_usage.get('reasoning_tokens') + token_usage.get('completion_tokens')
        try:
            batch_result_dict = remove_json_prefix_suffix(ai_text)
        except Exception as e:
            logger.error(f'解析分数出错: {e}, 输入文本: {ai_text}')
            batch_result_dict = {}
        match_score_result.update(batch_result_dict)
        match_score_result = { k: float(v) for k, v in match_score_result.items()}
        sorted_res = sorted(match_score_result.items(), key=lambda x: x[1], reverse=True)
    return dict(sorted_res)

async def get_material_category(ai_client: AI_Answer, content_text: str, category_path: str, token_usage: dict = {}):
    """
    获取
    :param ai_client:
    :param content_text:
    :param category_path:
    :return:
    """
    category_json = read_json_file(category_path)
    category_prompt = prompts.LLM_MATERIAL_TO_CATEGORY_PROMPT.format(material=content_text, rules=category_json)
    aiText_think = ""
    aiText = ""
    if token_usage:
        token_usage['prompt_tokens'] = token_usage.get('prompt_tokens') + ai_client.count_tokens(category_prompt)

    async for text in ai_client.get_ai_answer_R1(category_prompt):
        text_type = json.loads(text)
        if text_type['type'] == 'thinking':
            aiText_think += text_type['content']
        else:
            aiText += text_type['content']
    if token_usage:
        token_usage['reasoning_tokens'] = token_usage.get('reasoning_tokens') + ai_client.count_tokens(aiText_think)
        token_usage['completion_tokens'] = token_usage.get('completion_tokens') + ai_client.count_tokens(aiText)
        token_usage['total_tokens'] = token_usage.get('prompt_tokens') + token_usage.get('reasoning_tokens') + token_usage.get('completion_tokens')

    print(f'{content_text}类型为：{aiText}')
    category_list = [c.strip() for c in aiText.strip().split(",")]
    print(category_list)
    return category_list


async def get_prompt(ai_client: AI_Answer, content_text: str, category_list: str, rule_path: str, simple: bool = True, token_usage: dict = {}):
    """
    v1：v1版本通过找到历史相似问询，使用大模型进行提问。
    v2：v2版本，无需找到历史问询，根据问询规则进行提问
    :param content_text:
    :return:
    """
    rules = read_json_file(rule_path)
    match_rules = {}
    # 防止生成的类型不在里面
    category_list = [c.strip() for c in category_list if c.strip() in rules]
    # 简易模式
    if simple:
        category_list = category_list[0:1]
    for category in category_list:
        rule_list = rules.get(category)
        if simple:
            rule_list = rule_list[:100]
        match_scores = await calculate_rule_match_score(ai_client, content_text, category, rule_list, batch_size=16, token_usage=token_usage)        
        # 防止match_scores太长，截取前10个
        top_10_match_scores = dict(sorted(match_scores.items(), key=lambda x: x[1], reverse=True)[:10])
        logger.info(f'当前类型: {category}, top 10 规则匹配分数: {top_10_match_scores}, 输入文本: {content_text}')
        # 构造这10个规则
        category_rule_list = []
        for rule_index, score in top_10_match_scores.items():
            temp = {}
            temp["rule"] = rule_list[int(rule_index)]
            temp["score"] = score
            category_rule_list.append(temp)
        match_rules[category] = category_rule_list
    logger.info(f'所有类型: {category_list}, 规则匹配分数: {match_rules}, 输入文本: {content_text}')
    inquiry_prompt = prompts.LLM_INQUIRY_BY_RULES_PROMPT.format(material=content_text, match_rules=match_rules)
    return inquiry_prompt


async def test_get_prompt():
    content_text = '发行人与齐齐哈尔博实畜牧服务有限公司就支持和牛养殖事宜签署了《支持和牛养殖协议》，约定发行人为齐齐哈尔博实畜牧服务有限公司提供6,000万元额度的专项资金支持，用于和牛养殖服务；齐齐哈尔博实畜牧服务有限公司以其拥有的牛只对上述专项资金支持额度提供抵押担保，齐齐哈尔博实畜牧服务有限公司的直接股东彭辉及关联方龙江县彭辉肉牛养殖场、龙江黑牛牧业有限公司梅里斯分公司提供连带责任保证。'
    await get_prompt(content_text, '../data/rules_sample.json', '../data/rules_query_merged.json')

if __name__ == '__main__':
    import asyncio
    asyncio.run(test_get_prompt())

# if __name__ == '__main__':
#     import asyncio
#
#     # content_text = '招股书文本'
#
#     content_text = '发行人与齐齐哈尔博实畜牧服务有限公司就支持和牛养殖事宜签署了《支持和牛养殖协议》，约定发行人为齐齐哈尔博实畜牧服务有限公司提供6,000万元额度的专项资金支持，用于和牛养殖服务；齐齐哈尔博实畜牧服务有限公司以其拥有的牛只对上述专项资金支持额度提供抵押担保，齐齐哈尔博实畜牧服务有限公司的直接股东彭辉及关联方龙江县彭辉肉牛养殖场、龙江黑牛牧业有限公司梅里斯分公司提供连带责任保证。'
#     problem_id_list = [297, 401, 807]
#     prompt = asyncio.run(get_prompt(content_text, problem_id_list))
#     print(prompt)
