# coding=utf-8
import os
import sys
import random

# 将项目根目录添加到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from chains.C2_recall_material import recall_material_list, recall_material_distances_db
from chains.C3_material_problem import Material2Problem
from chains.C4_get_prompt_v2 import get_prompt, get_material_category
from chains.C5_AI_answer import AI_Answer
from chains.C20_get_prompt_third import get_prompt_third_db
from chains.C15_bubble_title import Bubble
from prompts.C24_prompt import prompt_24
from config.config import config
from collections import OrderedDict
from utils.functions import Get_Dict
from datetime import datetime
from utils.db_pool import LawDB
from openai import OpenAI, AsyncOpenAI
import time
import json
from chains import C4_get_prompt_v2
import asyncio
from utils.utils import remove_json_prefix_suffix
from utils.logger import logger

# 初始化大模型类
ai_client = AI_Answer()

class Overall_Bubble_V2:

    async def C2_distance(self, db: LawDB, content_text_dict: dict, sector: str, industry: str, region: str):
        tasks = set()
        for index, content_text in content_text_dict.items():
            distances_bubble_task = asyncio.create_task(
                self.single_slice_distances(db, content_text, index, sector, industry, region))
            tasks.add(distances_bubble_task)
            # 添加回调函数，在任务完成后从集合中移除
            distances_bubble_task.add_done_callback(tasks.discard)
            # 等待所有任务完成
        results = await asyncio.gather(*tasks)
        distances_ordered_dict = {}
        for result in results:
            distances_ordered_dict.update(result)
        return distances_ordered_dict

    # 切片存入数据库
    async def single_slice_distances(self, db: LawDB, content_text: str, ordered_index: int, sector: str, industry: str, region: str):

        time_0 = time.time()
        Title = Bubble()
        problem_type_list = await Title.title_bubble_v3(content_text, sector)
        time_1 = time.time()

        logger.info(f'调用 C15 获取问题分类标签 时间：{time_1 - time_0}')

        try:
            await db.init_pool()
            distances_list = await recall_material_distances_db(db.pool, content_text, 3, sector, industry, region, problem_type_list)

            if len(distances_list) == 0:
                logger.warning(f'C2 列表长度=0，返回空字典: {distances_list}')
                return {}
            elif isinstance(distances_list, list):
                pass
            else:
                logger.error(f'type(distances_list): {type(distances_list)}')
                logger.error(f'C2 输出是字符串不是列表 distances_list: {distances_list}')
                return {}
        except Exception as e:
            logger.error(f'C2 输出是字符串不是列表返回空字典: {e}', exc_info=True)
            return {}

        time_2 = time.time()
        logger.info(f'调用 C2 获得材料详情列表 时间：{time_2 - time_1}')

        material_id_list = []
        material_distances_list = []
        material_content_list = []

        try:
            for material_dict in distances_list:
                logger.debug(f'material_dict: {material_dict}')
                material_id_list.append(list(material_dict.keys())[0])
                material_distances_list.append(list(material_dict.values())[0]['材料距离'])
                material_content_list.append(list(material_dict.values())[0]['材料内容'])
            min_distance = min(material_distances_list).item()
            logger.info(f'最小距离：{min_distance}')
        except Exception as e:
            raise ValueError(f"distances_list_type:{type(distances_list)};distances_list: {distances_list};content_text: {content_text};problem_type_list: {problem_type_list};exception: {e}")

        bubble_dict = {}
        bubble_dict['contentText'] = content_text
        bubble_dict['bubbleTitle'] = problem_type_list
        bubble_dict['materials'] = material_content_list
        bubble_dict['material_id_list'] = material_id_list


        return {ordered_index: {'min_distance': min_distance, 'bubble_dict': bubble_dict}}


    async def R1_score(self, content_text_dict: dict, token_usage: dict):
        """
        计算R1分数
        :param content_text_dict: 待评分的招股说明书片段
        :param token_usage: token消耗统计
        :return:
        """
        query_message = prompt_24(content_text_dict)
        # 计算query_message的消耗的token, 更新token_usage的prmopt_tokens
        token_usage['prompt_tokens'] = token_usage.get('prompt_tokens') + ai_client.count_tokens(query_message)
        logger.info(f'query_message_token_usage: {ai_client.count_tokens(query_message)}')
        think_text = ''
        answer_text = ''
        async for chunk in ai_client.get_ai_answer_R1(query_message, output_json=True):
            chunk_json = json.loads(chunk)
            if chunk_json['type'] == 'thinking':
                think_text += chunk_json['content']
            else:
                answer_text += chunk_json['content']
        logger.info(f'招股说明书切片的评分结果为：{answer_text}')

        # 更新token消耗
        token_usage['reasoning_tokens'] = token_usage.get('reasoning_tokens') + ai_client.count_tokens(think_text)
        token_usage['completion_tokens'] = token_usage.get('completion_tokens') + ai_client.count_tokens(answer_text)
        token_usage['total_tokens'] = token_usage.get('prompt_tokens') + token_usage.get('reasoning_tokens') + token_usage.get('completion_tokens')
        
        score_dict = eval(Get_Dict(answer_text))
        # 如果需要保持有序，可以使用 OrderedDict
        ordered_dict = OrderedDict(sorted(score_dict.items(), key=lambda item: item[1], reverse=True))
        # 将键从字符串转换为整数
        R1_dict = {str(key): value for key, value in ordered_dict.items()}
        logger.info(f'R1_dict: {R1_dict}')
        return R1_dict


    async def single_bubble(self, db: LawDB, review_index: int, value: tuple,
                            fileId: str, content_slice_dict: dict, progress: dict, token_usage: dict):
        content_text = content_slice_dict[str(review_index)]
        bubble_dict = {}
        try:
            bubble = await db.get_history_overall_bubble(fileId, review_index)
        except Exception as e:
            logger.error(f'获取 bubbles 出错， 删除切片气泡: {e}', exc_info=True)
            await db.delete_history_overall_bubble(fileId, review_index)
        # 因为这里都插入了，所以直接为空
        logger.debug(f'bubble type: {type(bubble)}')
        if bubble:
            bubble_dict = bubble
        # 找到相似的材料
        material_id_list = None
        if bubble_dict:
            material_id_list = bubble_dict.get('material_id_list', None)
        Material = Material2Problem()

        problem_id_list = None
        if material_id_list:
            problem_id_list = await Material.material_problem_db(db.pool, material_id_list)

        # 根据问询规则召回对应的prompt
        rules_path = os.path.join(config.DATA_DIR, 'rules_query_merged.json')

        time_4 = time.time()
        AI = AI_Answer()
        aiText = ''
        aiText_think = ''

        category_list =[c.strip() for c in value[0].strip().split(",")]
        # 规则匹配评分的时候，消耗的token统计
        aiText_prompt = await get_prompt(AI, content_text, category_list, rules_path, simple=False, token_usage=token_usage)

        await db.update_task_token_usage(progress['task_id'], json.dumps(token_usage))

        logger.info(f'aiText_prompt: {aiText_prompt}')

        token_usage['prompt_tokens'] = token_usage.get('prompt_tokens') + ai_client.count_tokens(aiText_prompt)

        async for Text in AI.get_ai_answer_R1(aiText_prompt, output_json=True):
            Text_Type = json.loads(Text)
            if Text_Type['type'] == 'thinking':
                aiText_think += Text_Type['content']
            else:
                aiText += Text_Type['content']
        token_usage['reasoning_tokens'] = token_usage.get('reasoning_tokens') + ai_client.count_tokens(aiText_think)
        token_usage['completion_tokens'] = token_usage.get('completion_tokens') + ai_client.count_tokens(aiText)
        token_usage['total_tokens'] = token_usage.get('prompt_tokens') + token_usage.get('reasoning_tokens') + token_usage.get('completion_tokens')
        await db.update_task_token_usage(progress['task_id'], json.dumps(token_usage))
        
        logger.info(f'review_index: {review_index}, aiText: {aiText}\n')
        try:
            aiText = remove_json_prefix_suffix(aiText)
        except Exception as e:
            logger.error(f'解析 aiText 出错， review_index: {review_index}, aiText: {aiText}, 错误信息：{e}', exc_info=True)
            aiText = {"rules_ref" : [], "inquiry": ""}
        
        time_5 = time.time()
        logger.info(f'调用 C4 获取 发行人 回答 时间：{time_5 - time_4}')

        bubble_dict['aiText'] = aiText['inquiry']
        bubble_dict['aiText_think'] = aiText_think
        bubble_dict['rule_ref'] = aiText['rules_ref']
        bubble_dict['useFlag'] = False
        bubble_dict['xialaCut'] = False
        bubble_dict['aiBtnFlag'] = False
        bubble_dict['cutBtnFlag'] = 2
        bubble_dict['sliceIndex'] = review_index
        bubble_dict['bubbleScore'] = value[1]
        bubble_dict['contentText'] = content_text
        bubble_dict['domElementCur'] = None
        bubble_dict['problem_id_list'] = problem_id_list


        await db.init_pool()
        aiTextThird_prompt = await get_prompt_third_db(db.pool, content_text, problem_id_list, aiText)
        aiTextThird = ''
        aiTextThird_think = ''
        async for Text in AI.get_ai_answer_R1(aiTextThird_prompt):
            Text_Type = json.loads(Text)
            if Text_Type['type'] == 'thinking':
                aiTextThird_think += Text_Type['content']
            else:
                aiTextThird += Text_Type['content']
        time_6 = time.time()
        logger.info(f'调用 C5 获取 第三方 回答 时间：{time_6 - time_5}')

        bubble_dict['aiTextThird'] = aiTextThird
        bubble_dict['aiTextThird_think'] = aiTextThird_think

        await db.update_history_overall(fileId=fileId, bubble=bubble_dict, status_code=0,
                                        slice_index=int(review_index),
                                        bubble_num=None)

        # 更新进度
        progress['have_finished'] = progress['have_finished'] + 1
        await db.update_task_progress(progress['task_id'], progress['total_inquiry'], progress['have_finished'])

        # 更新状态表
        logger.debug(f'bubble_dict: {bubble_dict}')
        logger.info('单气泡产生完成')



    async def bubbles_generated(self, sample_dict: dict, C2_dict: dict, db: LawDB, user_id: int, file_name: str, fileId: str, content_slice_dict: dict, progress: dict, token_usage: dict):
        """
        这个元素生成bubble
        :param sample_dict: 采样结果， {index: ('类别1，类别2，类别3', 分数) }
        :param C2_dict: 用于相似问询
        :param db:
        :param user_id:
        :param file_name:
        :param fileId:
        :param content_slice_dict:
        :return:
        """
        # 按照index进行排序
        sample_dict = dict(sorted(sample_dict.items(), key=lambda item: item[0]))
        logger.info(f'排序后的采样：{sample_dict}')
        logger.info(f'相似问询查询结果：{C2_dict}')

        insert_history_overall_tasks = set()
        for index, value in sample_dict.items():
            C2_distance = C2_dict.get(str(index))
            if C2_distance:
                bubble = C2_distance['bubble_dict']
                distance = C2_distance['min_distance']
            else:
                bubble = {}
                distance = None
            
            if  isinstance(bubble, dict):
                bubble['category'] = value[0]
            insert_task = asyncio.create_task(db.insert_history_overall(user_id=user_id, file_name=file_name, bubble=bubble, fileId=fileId,
                                            status_code=0, slice_index=int(index), score=value[1], distance=distance))
            insert_history_overall_tasks.add(insert_task)
            insert_task.add_done_callback(insert_history_overall_tasks.discard)
        # 所有的都插入进数据库
        await asyncio.gather(*insert_history_overall_tasks)
        all_bubble_tasks = set()
        for index, value in sample_dict.items():
            single_bubble_task = asyncio.create_task(self.single_bubble(db, index, value, fileId, content_slice_dict, progress, token_usage))
            all_bubble_tasks.add(single_bubble_task)
            single_bubble_task.add_done_callback(all_bubble_tasks.discard)
        await asyncio.gather(*all_bubble_tasks)



    # 获取上下文片段的类型，从而可以根据类型进行去重
    async def classify_content_text(self, content_text_dict: dict, token_usage: dict):
        category_path = os.path.join(config.DATA_DIR, 'rules_sample.json')
        tasks = set()
        index_list = []
        for index, content_text in content_text_dict.items():
            classification_task = asyncio.create_task(C4_get_prompt_v2.get_material_category(ai_client, content_text, category_path, token_usage))
            tasks.add(classification_task)
            index_list.append(str(index))
            classification_task.add_done_callback(tasks.discard)
        result_list = await asyncio.gather(*tasks)
        # 将任务整合
        category_dict = OrderedDict()
        for index, result in zip(index_list, result_list):
            category_dict[index] = ','.join(result)
        return category_dict


    def context_sample(self, category_dict: dict, inquiry_score_dict: dict, n_sample: int):
        """
        :param category_dict: 类型: {index: '类型1, 类型2'}
        :param inquiry_score_dict: 问询分数:  {index: score}
        :param n_sample: 采样数量，
        :return {index: ('类别1，类别2，类别3', 分数) }
        元素采样算法。
        1.建立类别对元素的映射，元素按照分数进行排列
        :return: 采样需要问询的列表，以及对应的类型
        """
        assert len(category_dict) == len(inquiry_score_dict)

        result = {}
        # 小于采样的数量，那么直接返回， 不需要进行去重
        if len(inquiry_score_dict) <= n_sample:
            for index, score in inquiry_score_dict.items():
                result[index] = (category_dict[index], score)
            return result

        sample_map = {}
        for index, category_list in category_dict.items():
            category_list_split = category_list.split(',')
            for category in category_list_split:
                if category not in sample_map:
                    sample_map[category] = [(index, inquiry_score_dict[index])]
                else:
                    sample_map[category].append((index, inquiry_score_dict[index]))
        logger.info(f'sample_map:\n{sample_map}')
        # 对于sample_map按照score大小进行排序
        # sample_map : {'c1':[(index1, score1), (index2, score2)]}
        sample_map = {key : sorted(value, key=lambda x: x[1], reverse=True) for key, value in sample_map.items()}
        # 然后进行采样
        count = 0 # 采样的数量
        index = 0 # 当前采样的index
        step = 0 # 遍历计数器
        running = True
        oversize = random.randint(0, 10) # 允许有个0到20的冗余
        while running:
            for key, value in sample_map.items():
                # 总数超过冗余的数量
                if count >= (n_sample + oversize):
                    running = False
                    break
                # 采样数量达到n_sample，并且每轮都能完整遍历一遍
                if count >= n_sample and step > 0 and step % len(sample_map) == 0:
                    running = False
                    break
                if step > 0 and step % len(sample_map) == 0:
                    index += 1
                step += 1 # 遍历计数器
                if len(value) < index + 1:
                    continue
                if value[index][0] in result: # 采样过了，直接放弃
                    continue
                content_index = value[index][0]
                result[content_index] = (category_dict[content_index], value[index][1])  # {index: (类别，分数)}
                count += 1 # 采样数量 +1
        return result


    async def overall_bubble(self, index_paragraph_sentence_dict: dict, sector: str, industry: str, region: str, user_id: int, file_name: str, fileId: str):
        # 插入任务的状态
        # 接下来召回相似材料，并且进行问询
        db = LawDB()
        await db.init_pool()

        # 插入任务状态
        task_id = await db.insert_task(user_id=user_id, sector=sector, industry=industry, region=region, file_name=file_name, fileId=fileId, result=None, error=None, progress=0, status='running', total_inquiry=0, current_inquiry=0, token_usage=None)
        logger.info(f'overall_bubble task_id: {task_id}')
        # 用于统计token消耗
        token_usage = {'prompt_tokens': 0, 'reasoning_tokens':0, 'completion_tokens':0, 'total_tokens':0}
        key_value_list = list(index_paragraph_sentence_dict.items())
        start_time = time.time()
        tasks = set()
        for index in range(0, len(key_value_list), 16):
            # 每16个进行一直评分，分类
            batch = key_value_list[index: index + 16]
            batch_dict = dict(batch)
            batch_content_text_dict = OrderedDict()
            for k, v in batch_dict.items():
                batch_content_text_dict[k] = v['文本']
            # 评分，记录申报文件所属的类型, 以及是否需要评分
            R1_task = asyncio.create_task(self.R1_score(batch_content_text_dict, token_usage))
            tasks.add(R1_task)
            R1_task.add_done_callback(tasks.discard)
        # 找到所有的评分。
        results_list = await asyncio.gather(*tasks)
        results_dict = {}
        for result in results_list:
            if isinstance(result, dict):
                results_dict.update(result)

        # 打印招股说明书切片消耗的token情况
        logger.info(f'overall_bubble R1_task token_usage: {token_usage}')
        # 将消耗的token插入数据库
        await db.update_task_token_usage(task_id, json.dumps(token_usage))

        # 对分数大于8分及以上的进行去重
        results_dict = {str(k): int(v) for k, v in results_dict.items() if int(v) >= 8} # {index: score}
        logger.info(f'招股说明书问询分数超过8分的切片索引: {results_dict}')
        index_paragraph_sentence_dict_ = { str(k): v for k, v in index_paragraph_sentence_dict.items() if str(k) in results_dict.keys()}
        logger.info(f'招股说明书问询分数超过8分的原始切片:\n{index_paragraph_sentence_dict_}')
        content_text_dict_ = OrderedDict({str(k) : v['文本'] for k, v in index_paragraph_sentence_dict_.items()})
        logger.info(f'招股说明书问询分数超过8分的切片文本：\n{content_text_dict_}')
        # 这里需要对results进行分类判别了
        category_dict = await self.classify_content_text(content_text_dict_, token_usage)  # {index: 'c1,c2'}
        # 分类后，需要打印token_usage信息
        logger.info(f'问询切片内容分类后token_usage: {token_usage}')
        await db.update_task_token_usage(task_id, json.dumps(token_usage))

        logger.info(f'问询切片内容分类结果：\n{category_dict}')
        # 采用后的结果: sample_result: {index: ('类别1，类别2，类别3', 分数) }
        sample_result = self.context_sample(category_dict, results_dict, 50)
        logger.info(f'采样后的结果：\n{sample_result}')

        content_text_dict_ = OrderedDict({k: v for k, v in content_text_dict_.items() if k in sample_result})
        content_slice_dict = OrderedDict({k: index_paragraph_sentence_dict[k]['切片'] for k in content_text_dict_.keys()})

        # 更新任务状态。

        # 记录进度，总的数量，完成的数量
        progress = {'total_inquiry': len(content_slice_dict), 'have_finished': 0, 'task_id': task_id}
        logger.info(f'progress: {progress}')
        await db.update_task_progress(task_id, progress['total_inquiry'], progress['have_finished'])

        # 寻找相似材料
        # C2_task = asyncio.create_task(self.C2_distance(db, content_text_dict_, sector, industry, region))
        # C2_task为C2_distance异步任务运行的结果
        C2_task = await self.C2_distance(db, content_text_dict_, sector, industry, region)
        logger.info(f'C2_task: {C2_task}')

        bubbles_generated_tasks = set()
        # 现在生成气泡
        ai_reply_task = asyncio.create_task(self.bubbles_generated(sample_result, C2_task, db, user_id, file_name, fileId, content_text_dict_, progress, token_usage))
        bubbles_generated_tasks.add(ai_reply_task)
        ai_reply_task.add_done_callback(bubbles_generated_tasks.discard)
        # 执行生成任务
        await asyncio.gather(*bubbles_generated_tasks)
        # 更新总进度
        progress['have_finished'] = progress['total_inquiry']
        logger.info(f'progress: {progress}')
        await db.update_task_progress(task_id, progress['total_inquiry'], progress['have_finished'])

        end_time = time.time()
        logger.info(f'总耗时: {end_time - start_time}')
        # 更新任务状态

if __name__ == '__main__':

    Overall = Overall_Bubble_V2()
    test_doc_path = os.path.join(config.PROJECT_DIR, 'data', 'liqi_test_doc.json')
    with open(test_doc_path, 'r', encoding='utf-8') as f:
        index_paragraph_sentence_dict = json.load(f)
    Sector = '科创板'
    Region = ""
    user_id = 1
    file_name = '理奇股份0916.docx'
    file_id = 'jp_liqi_0916_test'
    Industry = "计算机、通信和其他电子设备制造业"
    ####################### 屹唐股份 ##########################
    # 获取当前时间
    current_time = datetime.now()
    # 打印当前时间
    print(f"总程序开始时间", current_time)

    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)

    index_paragraph_sentence_dict = list(index_paragraph_sentence_dict.items())[:10]
    index_paragraph_sentence_dict = dict(index_paragraph_sentence_dict)

    loop.run_until_complete(Overall.overall_bubble(index_paragraph_sentence_dict, Sector, Industry, Region, user_id, file_name, file_id))

    # 获取当前时间
    current_time = datetime.now()

    # 打印当前时间
    print(f"总程序结束时间", current_time)



