import json
import pandas as pd
import re
from html2text import html2text

import sys
sys.path.append('/workspace/qanything_local')
from qanything_kernel.dependent_server.llm_api_serve.glm4_serve import ZhipuAILLM


def process_content(content, llm, filename, file_id=None):
    try:
        llm.temperature = 0.2
        prompt = f"""根据以下参考信息中的内容部分，生成不超过10个问答对。需要遵守以下几点要求：
        # 生成的问题以第一人称口吻，尽量偏口语化，意思简单、直接明了；
        # 生成的答案开头要有依据的是哪个法规或者规定第几章第几条；
        # 答案内容不需要总结，直接从内容部分截取出来。

        参考信息如下:
        {content}
        # 回答格式要求:
        将`问题`和`答案`放到json中，最后放到列表中输出，其他额外信息不要输出。"""

        response = llm.chat(prompt=prompt, model_name='glm-4')
        qa_list = extract_qa_from_response(llm, response, content, filename, file_id)
    except Exception as e:
        qa_list = []
    return qa_list

def extract_qa_from_response(llm, response, content, filename, file_id=None):
    global num

    sim_prompt = """现在你是一名问句改写专家，根据提供的问句，必须在不改变句子意思的前提下，帮我改写成若干相似问句。需要遵守以下要求：
    # 1. 改写的问句放到python列表中输出，格式为`['改写问句1',...]`，其他额外信息不要输出；
    # 2. 改写的问句主要意思不能发生变化；
    # 3. 改写的问句尽量偏口语化，简短，意思简单、直接明了。"""

    try:
        qa_list = eval(response.split('```json\n')[-1].split('\n```')[0])

        for qa in qa_list:
            # 生成相似问
            query = qa.get('问题')

            messages = [
                {
                    "role": "system", 
                    "content": sim_prompt
                },
                {
                    "role": "user", 
                    "content": "对涉嫌传销的行为，工商行政管理部门可以采取哪些措施？"
                },
                {
                    "role": "assistant", 
                    "content": """["怎么处理传销？","怎么处理涉嫌传销的行为","遇到传销了，相关部门能起到什么作用？","你知道，相关部门有什么办法能处理传销行为？"]"""
                },
                 {
                    "role": "user", 
                    "content": "哪些行为属于有奖销售？"
                },
                {
                    "role": "assistant", 
                    "content": """["什么叫有奖销售？","怎么理解有奖销售？","你知道什么是有奖销售吗？","我碰到的这个是有奖销售吗？"]"""
                },
                {
                    "role": "user", 
                    "content": query
                },

            ]

            response = llm.client.chat.completions.create(
                    model='glm-3-turbo',
                    messages=messages,
                    stream=False,
                    max_tokens=2048,
                    temperature=0.2,
                    top_p=0.9
                )
            sim_questions = response.choices[0].message.content if response.choices else ""

            # sim_questions = llm.chat(prompt=sim_prompt.format(query=query), model_name='glm-3-turbo')
            try:
                # questions = eval(sim_questions.split('```python\n')[-1].split('\n```')[0])
                questions = eval(sim_questions)
            except Exception as e:
                continue
            qa['相似问'] = questions
            if file_id:
                qa['文件ID'] = file_id
            qa['出处'] = filename
            qa['序号'] = num
            num += 1
    except Exception as e:
        pattern = r'\{([^{}]*)\}'
        qa_text = re.findall(pattern, response)
        print(qa_text)
        qa_list = []
        for qa in qa_text:
            dic_qa = json.loads('{' + qa + '}')
            # 生成相似问
            query = dic_qa.get('问题')

            messages = [
                {
                    "role": "system", 
                    "content": sim_prompt
                },
                {
                    "role": "user", 
                    "content": "对涉嫌传销的行为，工商行政管理部门可以采取哪些措施？"
                },
                {
                    "role": "assistant", 
                    "content": """["怎么处理传销？","怎么处理涉嫌传销的行为","遇到传销了，相关部门能起到什么作用？","你知道，相关部门有什么办法能处理传销行为？"]"""
                },
                {
                    "role": "user", 
                    "content": query
                },

            ]

            response = llm.client.chat.completions.create(
                    model='glm-3-turbo',
                    messages=messages,
                    stream=False,
                    max_tokens=2048,
                    temperature=0.2,
                    top_p=0.9
                )
            sim_questions = response.choices[0].message.content if response.choices else ""

            # sim_questions = llm.chat(prompt=sim_prompt.format(query=query), model_name='glm-3-turbo')
            try:
                # questions = eval(sim_questions.split('```python\n')[-1].split('\n```')[0])
                questions = eval(sim_questions)
            except Exception as e:
                continue
            dic_qa['相似问'] = questions
            if file_id:
                dic_qa['文件ID'] = file_id
            dic_qa['出处'] = filename
            dic_qa['序号'] = num
            num += 1
            qa_list.append(dic_qa)
    return qa_list



def main():
    llm = ZhipuAILLM()
    streaming = False
    chat_history = []
    llm.max_token = 2048

    # sim_prompt = """现在你是一名问句改写专家，根据提供的问句，必须在不改变句子意思的前提下，帮我改写成若干相似问句。需要遵守以下要求：
    # # 1. 改写的问句放到python列表中输出，格式为`['改写问句1',...]`，其他额外信息不要输出；
    # # 2. 改写的问句主要意思不能发生变化；
    # # 3. 改写的问句尽量偏口语化，简短，意思简单、直接明了。"""

    # while 1:
    #     query = input('user:')
    #     messages = [
    #         {
    #             "role": "system", 
    #             "content": sim_prompt
    #         },
    #         {
    #             "role": "user", 
    #             "content": "对涉嫌传销的行为，工商行政管理部门可以采取哪些措施？"
    #         },
    #         {
    #             "role": "assistant", 
    #             "content": """["怎么处理传销？","怎么处理涉嫌传销的行为","遇到传销了，相关部门能起到什么作用？","你知道，相关部门有什么办法能处理传销行为？"]"""
    #         },
    #         {
    #             "role": "user", 
    #             "content": query
    #         },

    #     ]
    #     response = llm.client.chat.completions.create(
    #             model='glm-3-turbo',
    #             messages=messages,
    #             stream=False,
    #             max_tokens=2048,
    #             temperature=0.2,
    #             top_p=0.9
    #         )

    #     sim_questions = response.choices[0].message.content if response.choices else ""
    #     print(sim_questions)


    # df = pd.read_csv(r'/workspace/qanything_local/qanything_kernel/row_data/poc_1.7w+_data/2024-03-29_市热线-知识库点点通信息_17603.csv', encoding='utf-8')

    # def qa_12345():
    #     QA = []
    #     with open('/workspace/qanything_local/qanything_kernel/row_data/poc_1.7w+_data/standard_qa_pair-0513.json', 'a', encoding='utf-8') as json_file:
    #         for name in ['政策类', '公共服务类', '名词解释类', '其他类']:
    #             print(name)
    #             n = 0
    #             for i in range(len(df)):
    #                 if df.iloc[i]['内容分类'] != name:  # ['政策类', '公共服务类', '名词解释类', '其他类']
    #                     continue
    #                 else:
    #                     if n >= 5:
    #                         break
    #                     file_id = df.iloc[i]['知识库ID']
    #                     content = html2text(df.iloc[i]['内容'])
    #                     qa_list = process_content(content, llm, file_id)
    #                     QA.extend(qa_list)
    #                     n += 1
    #         json.dump(QA, json_file, ensure_ascii=False, indent=4)
    # qa_12345()

    def qa_12315():
        import os
        directory = '/workspace/qanything_local/qanything_kernel/row_data/12315知识库/zsk-0528'
        file_path = []
        QA = []
        with open('/workspace/qanything_local/qanything_kernel/row_data/12315知识库/gen_qa-0524/12315_qa_pair-0528.json', 'w', encoding='utf-8') as json_file:
            for root, dirs, files in os.walk(directory):
                for file in files:
                    file_path.append({
                        "path": os.path.join(root, file),
                        "filename":file
                    })
            for data in file_path:
                path = data['path']
                filename = data['filename']
                with open(path, 'r', encoding='utf-8') as f:
                    content = f.read()
                    normalized_content = re.sub(r'\n+', '\n', content)
                    cleaned_content = re.sub(r'\u3000+', ' ', normalized_content)
                    qa_list = process_content(cleaned_content, llm, filename)
                    QA.extend(qa_list)
            json.dump(QA, json_file, ensure_ascii=False, indent=4)
    qa_12315()


if __name__ == "__main__":
    
    num = 1

    # main()

    # with open('/workspace/qanything_local/qanything_kernel/row_data/12315知识库/gen_qa-0524/中华人民共和国产品质量法_qa_pair.json', 'r', encoding='utf-8') as f:
    #     data_list = json.load(f)
    #     D = []
    #     for data in data_list:
    #         D.append(
    #             {
    #                 "问题":data['问题'],
    #                 "答案":data['答案'],
    #                 "出处":data['出处']
    #             }
    #         )

    #         for question in data['相似问']:
    #             D.append(
    #                 {
    #                     "问题":question,
    #                     "答案":data['答案'],
    #                     "出处":data['出处']
    #                 }
    #             )

    import pandas as pd

    # df = pd.DataFrame(D)

    # csv_file_path = '/workspace/qanything_local/qanything_kernel/row_data/12315知识库/gen_qa-0524/12315_zsk.csv'  # 替换为您的输出CSV文件路径
    # df.to_csv(csv_file_path, mode='a', index=False, header=False, encoding='utf-8')
    
    csv_file_path = '/workspace/qanything_local/qanything_kernel/row_data/12315知识库/gen_qa-0524/12315_zsk-0530.csv'  

    df = pd.read_csv(csv_file_path, encoding='utf-8')

    print(df)
