# 本模块主要用于生成问询规则
import yaml
from utils.utils import dict_to_yaml, yaml_to_dict, write_file, write_dict_to_json
import os
import pandas as pd
import json
from prompts.prompts import (
    CATEGORY_EXTRACTION_PROMPT,
    CATEGORY_EXTRACTION_SYS_PROMPT,
    CATEGORY_MERGE_PROMPT,
    CATEGORY_MERGE_SYS_PROMPT,
    CATEGORY_DROP_DUPLICATE_PROMPT,
    IPO_SYS_PROMPT,
    QUERY_RULE_PROMPT
 )

from service.llm_service import LLM_Service

llm_service = LLM_Service()

class Rules_Service:
    def export_problem_title(self, save_path, problem_df, project_df):
        """
        导出问询的问题标题并保存为YAML文件。
        """
        problem_title_map = {}
        problem_df.sort_values(by=['project_id', 'round', 'problem_order'], ascending=[True, True, True])
        for index in range(problem_df.shape[0]):
            row = problem_df.iloc[index, :]
            problem_id = row['problem_id']
            project_id = row['project_id']
            round = row['round']
            file_name = row['file_name']
            problem_title = row['problem_title']
            company_name = project_df[project_df['project_id'] == project_id]['company_name'].to_list()[0] + '_%s轮' % str(round)
            if company_name not in problem_title_map:
                problem_title_map[company_name] = [problem_title]
            else:
                problem_title_map[company_name].append(problem_title)
        if not os.path.exists(os.path.dirname(save_path)):
            os.makedirs(os.path.dirname(save_path), exist_ok=True)
        dict_to_yaml(problem_title_map, save_path)

    def problem_filter(self, problem_title_path, save_path):
        """
        由于切分有错误，需要过滤标题为空的问题
        :param original_path:
        :param save_path:
        :return:
        """
        yaml_file_path = problem_title_path
        json_new = {}
        # 执行转换
        data = yaml_to_dict(yaml_file_path)
        for key in data.keys():
            temp = []
            for item in data[key]:
                item = item.strip("'")
                if len(item.replace('问题', '')) < 4:
                    continue
                temp.append(item)
            if len(temp) > 0:
                json_new[key] = temp
        dict_to_yaml(json_new, save_path)

    def batch_extract_promble_title(self, problem_path, save_dir, batch_size=50):
        # 加载问题的标题，提取问题对应的category，每50本问询函为一个batch
        problem_title_dict = yaml_to_dict(problem_path)
        batch = []
        if not os.path.exists(save_dir):
            os.makedirs(save_dir, exist_ok=True)
        for index, (key ,value) in enumerate(problem_title_dict.items()):
            batch.append(key + ":\n" + "\n".join(value))
            if (index + 1) % batch_size == 0:
                question_content = CATEGORY_EXTRACTION_PROMPT.format(history_question = "\n".join(batch))
                print(question_content)
                response = llm_service.call_with_messages(CATEGORY_EXTRACTION_SYS_PROMPT, question_content)
                print(response)
                write_file(os.path.join(save_dir, str(index + 1 - batch_size) + "-" + str(index + 1) + '_category.txt'), response)
                batch = []
                print("----------" * 20)
        if batch:
            question_content = CATEGORY_EXTRACTION_PROMPT.format(history_question="\n".join(batch))
            print(question_content)
            response = llm_service.call_with_messages(CATEGORY_EXTRACTION_SYS_PROMPT, question_content)
            print(response)
            write_file(os.path.join(save_dir, str(index + 1 - batch_size) + "-" + str(index + 1) + '_category.txt'), response)

    def merge_problem_titles(self, folder_dir, batch_size=6):
        """
        合并生成的category
        """
        index = 0
        prompt_content = []
        for filename in os.listdir(folder_dir):
            if filename.endswith('category_merged_3.txt'):
                file_path = os.path.join(folder_dir, filename)
                try:
                    with open(file_path, 'r', encoding='utf-8') as file:
                        json_text = json.load(file)
                    prompt_content.append(str(json_text))
                except Exception as e:
                    print(f"无法正确加载{file_path}:{e}")
        # 将生成的category分batch输入到大模型
        for start_index in range(0, len(prompt_content), batch_size):
            end_index = min(len(prompt_content), start_index + batch_size)
            question_content = CATEGORY_MERGE_PROMPT.format(category_and_problem="\n".join(prompt_content[start_index:end_index]))
            print(question_content)
            responses = llm_service.call_with_messages(CATEGORY_MERGE_SYS_PROMPT, question_content)
            print(responses)
            write_file(os.path.join(folder_dir, str(start_index) + "-" + str(end_index) + '_category_merged_4.txt'), responses)



    def count_problem_titles(self, file_path):
        """统计dict的key和values数量"""
        try:
            data = yaml_to_dict(file_path)
            for key in data:
                # 统计每种key下面有多少values
                print("key: %s, len(rules): %s" % (key, str(len(data[key]))))
            key_count = len(data.keys())
            total_values = sum([len(value) for value in data.values()])
            return key_count, total_values
        except (json.JSONDecodeError, FileNotFoundError) as e:
            print(f'无法正确加载{file_path}:{e}')
            return

    def count_dict_keys_and_values(self, file_path):
        """
        统计dict对象所有key和values的数量
        """
        try:
            res = dict
            with open(file_path, 'r', encoding='utf-8') as file:
                content = file.read()
                try:
                    data = json.loads(content)
                    if isinstance(data, dict):
                        key_count = len(data.keys())
                        total_values = sum([len(value) for value in data.values()])
                        #  每个key对应的values数量
                        for key in data:
                            print("key: %s, len(rules): %s" % (key, str(len(data[key]))))
                            res[key] = len(data[key])
                        res["key_count"] = key_count
                        res["total_values"] = total_values
                        return res
                    return None
                except json.JSONDecodeError:
                    try:
                        data = yaml.safe_load(content)
                        if isinstance(data, dict):
                            key_count = len(data.keys())
                            total_values = sum([len(value) for value in data.values()])
                            for key in data:
                                print("key: %s, len(rules): %s" % (key, str(len(data[key]))))
                                res[key] = len(data[key])
                            res["key_count"] = key_count
                            res["total_values"] = total_values
                            return res
                        return None
                    except yaml.YAMLError:
                        return None
        except (json.JSONDecodeError, FileNotFoundError) as e:
            print(f'无法正确加载{file_path}:{e}')
            return None



    def drop_duplicate_category(self, file_path, save_dir):
        """
        对category进行去重
        """
        try:
            with open(file_path, 'r', encoding='utf-8') as file:
                data = json.load(file)
            question_content = CATEGORY_DROP_DUPLICATE_PROMPT.format(category_and_problem=data)
            print(question_content)
            responses = llm_service.call_with_messages(IPO_SYS_PROMPT, question_content)
            print(responses)
            save_path = os.path.join(save_dir, 'category_drop_duplicate.txt')
            write_file(save_path, responses)
        except (json.JSONDecodeError, FileNotFoundError) as e:
            print(f'无法正确加载{file_path}:{e}')

    def format_json_file(self, path):
        """
        json格式化输出到文件
        """
        try:
            with open(path, 'r', encoding='utf-8') as file:
                data = json.load(file)
                print(type(data))
                print("data:", data)

            write_file(path.split('.')[0] + '1.txt', json.dumps(data, indent=4, ensure_ascii=False))
        except (json.JSONDecodeError, FileNotFoundError) as e:
            print(f'无法正确加载{path}:{e}')

    def generate_query_rules(self,category_content, problem_df, query_df, material_df, project_df, save_dir):
        """
        根据申报材料，历史问询问题，生成问询规则
        """
        for index in range(0, problem_df.shape[0]):
            row = problem_df.iloc[index, :]
            problem_id = row['problem_id']
            project_id = row['project_id']
            round = row['round']
            file_name = row['file_name']
            problem_title = row['problem_title']
            company_name = project_df[project_df['project_id'] == project_id]['company_name'].to_list()[0]

            if problem_id not in query_df['problem_id'].to_list() or problem_id not in material_df['problem_id'].to_list():
                continue
            query_df_ = query_df[query_df['problem_id'] == problem_id].sort_values(by='query_order', ascending=True)
            query_content = '\n'.join(query_df_['query_content'].to_list())

            material_df_ = material_df[material_df['problem_id'] == problem_id].sort_values(by='material_type',
                                                                                            ascending=True)
            material_content = '\n'.join(material_df_['material_content'].to_list())

            question_content = QUERY_RULE_PROMPT.format(material=material_content,
                                                      question=problem_title + '\n' + query_content,
                                                      ref_category=category_content)
            print(question_content)
            response = llm_service.call_with_messages(IPO_SYS_PROMPT, question_content)
            print(response)
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)
            save_file = '%s_%s_%s.json' % (str(company_name), str(round)+'轮', problem_title)
            write_file(os.path.join(save_dir, save_file), response)
            print('----------' * 10)



    def merge_query_rules(self, category_file, rules_folder):
        """
        合并问询规则
        """
        res = {}
        with open(category_file, 'r', encoding='utf-8') as file:
            category_content = json.load(file)

        for filename in os.listdir(rules_folder):
            if filename.endswith('.json'):
                file_path = os.path.join(rules_folder, filename)
                try:
                    with open(file_path, 'r', encoding='utf-8') as file:
                        rules_json = json.load(file)
                    if isinstance(rules_json, dict):
                        for key in rules_json:
                            if key in res:
                                res[key].extend(rules_json[key])
                            else:
                                res[key] = rules_json[key]
                except (json.JSONDecodeError, FileNotFoundError) as e:
                    print(f"无法正确加载{file_path}:{e}")
                    continue
        # 保存二级的。
        write_file(os.path.join(rules_folder, 'rules_query_merged.json'), json.dumps(res, indent=4, ensure_ascii=False))
        res_two_level = {}
        # 保存一级的。
        for one_level_key in category_content:
            res_two_level[one_level_key] = {}
            for two_level in category_content[one_level_key]:
                if two_level in res:
                    res_two_level[one_level_key][two_level] = res[two_level]
                else:
                    res_two_level[one_level_key][two_level] = []
        write_file(os.path.join(rules_folder, 'rules_query_merged_two_level.json'), json.dumps(res_two_level, indent=4, ensure_ascii=False))


if __name__ == '__main__':

    project_dir = r'D:\workspace\ipo2'
    data_dir = r'original_data'

    material_file = 'material.csv'
    problem_file = 'problem.csv'
    query_file = 'query.csv'
    project_file = 'project.csv'
    category_file = 'category.txt'
    material_df = pd.read_csv(os.path.join(project_dir, data_dir, material_file))
    problem_df = pd.read_csv(os.path.join(project_dir, data_dir, problem_file))
    query_df = pd.read_csv(os.path.join(project_dir, data_dir, query_file))
    project_df = pd.read_csv(os.path.join(project_dir, data_dir, project_file))

    rules_service = Rules_Service()

    save_dir = os.path.join(project_dir, 'produce_data', 'rules_query')
    save_name = 'all_problem_titles.yaml'
    save_path = os.path.join(save_dir, save_name)
    # rules_service.export_problem_title(save_path, problem_df, project_df)
    filter_save_path = os.path.join(save_dir, 'problem_title_filter.yaml')
    # rules_service.problem_filter(save_path, filter_save_path)
    # rules_service.count_problem_titles(filter_save_path)
    # keys_count, total_values = rules_service.count_problem_titles(filter_save_path)
    # print(f'keys_count: {keys_count}, total_values_count: {total_values}')
    # rules_service.batch_extract_promble_title(filter_save_path, save_dir=r"D:\\workspace\\ipo2\\produce_data\\rules_query\\category\\")
    # rules_service.merge_problem_titles(r"D:\\workspace\ipo2\produce_data\rules_query\category", batch_size=2)
    # rules_service.drop_duplicate_category(r"D:\workspace\ipo2\produce_data\rules_query\category\merged_save\0-2_category_merged_4.txt", r"D:\workspace\ipo2\produce_data\rules_query\category\merged_save")
    # rules_service.format_json_file(r"D:\workspace\ipo2\produce_data\rules_query\category\merged_save\deepseek_category_drop_duplicates.txt")
    category_path = r"D:\\PyWorkspace\\IPO2\\produce_data\\rules_query\\category\\merged_save\\deepseek_category_final_version.txt"
    # category_path = r"D:\\workspace\\ipo2\\produce_data\\rules_query\\category\\merged_save\\deepseek_category_final_version.txt"
    with open(category_path, 'r', encoding='utf-8') as file:
        category_content = json.loads(file.read())
        # print(category_content)
    # rules_save_dir = r"D:\\workspace\\ipo2\\produce_data\\rules_query\\rules\\版本2"
    # rules_service.generate_query_rules(category_content, problem_df, query_df, material_df, project_df, save_dir=rules_save_dir)
