# -*- coding:utf-8 -*-
import csv
import json

# @Time    : 2023/5/13 02:24
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @File    : user_info_extract.py
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
from agent.llm_agent import LLMAgent
import traceback
from common.log import logger
import os
import pandas as pd
import docx
import textract

ROOT_PATH = os.path.dirname(os.path.realpath(__file__))

# 1. 素材获取
# 2. 知识提取
# 3. 知识归类
# 4. 知识甄选？
# 5. 知识入库

# 素材类型
# MATERIAL_TYPE = {
#     'document':['.doc','.docx','.pdf'],
#     'picture':['.jpg','.png','.gif'],
#     'audio':['.mp3','.ogg']
# }

default_template = """
帮我按照知识库的形式整理成问答格式,同时区分问题类型是属于专业知识、用户问题、用户疑义、产品知识中的哪一种， 有且只有这4种类型，并分别用TYPE:，Q:和A:区分
输出结果是json，格式是:{{"TYPE":"","Q":"","A":""}}
===
{document}
===
        """

class MaterialManagement():

    def load_local_file(self,filePath:str):
        textList = []
        try:
            text = str(textract.process(filePath),'utf-8')
            textList = text.split('\n\n')

            conList = []
            for paragraph in textList:
                if paragraph and '发言人1' not in paragraph:
                    conList.append(paragraph)
            textStr = ''.join(conList)
            while (textStr != ''):
                textList.append(textStr[0:2048])
                textStr = textStr[2048:]
            return textList
        except Exception as ee:
            return textList
    def materials_obtaining_from_local(self, localPath:str):
        matContent = []
        if os.path.exists(localPath) is False :
            return matContent

        if os.path.isdir(localPath):
            for root, dirs, files in os.walk(localPath):
                for file in files:
                    # if os.path.splitext(file)[-1] in MATERIAL_TYPE.get(matType):
                    if os.path.splitext(file)[-1] in ['.doc','.docx']:
                        faqDictList = self.load_local_file(os.path.join(root , file))
                        if faqDictList:
                            logger.info(f'{file} 加载成功,')
                            matContent.extend(faqDictList)
        else:
            df = pd.read_csv(localPath)
            faqDictList = df.to_dict('records')
            logger.info(f'{localPath}加载成功, ')
            matContent.extend(faqDictList)
        return matContent


class Document2Knowledge(LLMAgent):

    def __init__(self, document):
        self.prompt = default_template
        self.prompt = self.prompt.format(document=document)
        super().__init__(self.prompt)


    def read_csv(self):
        '''

        Returns:

        '''

        pass

    def download_origin_file(self):

        pass

    def judge_knowledge_already_have(self):
        pass


    def parse_knowledge(self,knowledge:str,saveAsHistory=True):
        q_and_a_list = []
        try:
            lines = knowledge.split('\n')
            for line in lines:
                if line:
                    q_and_a_list.append(eval(line))

            if saveAsHistory and q_and_a_list:
                with open("q_and_a_history.json", "a") as f:
                    json.dump(q_and_a_list, f, ensure_ascii=False)
                    f.write("\n")
        except Exception as ee:
            pass

        return q_and_a_list
    def write_knowledge_to_csv(self,targetKnowledgePath:str,knowledges:[]):
        try:
            knowledges.sort(key=lambda k: (k.get('TYPE', 0)))

            loadCount = {}
            for kDict in knowledges:
                q = kDict.get('Q')
                a = kDict.get('A')
                type = kDict.get('TYPE')

                writeFile = '规划师_{}.csv'.format(type)
                content = [q,',',a]

                if os.path.exists(os.path.join(targetKnowledgePath,writeFile)) is False:
                    with open(os.path.join(targetKnowledgePath, writeFile), "a", newline="") as f:
                        w = csv.writer(f, delimiter="\t")
                        w.writerow(['知识点',',','知识内容'])
                    f.close()

                with open(os.path.join(targetKnowledgePath,writeFile), "a",newline="") as f:
                    w = csv.writer(f,delimiter="\t")
                    w.writerow(content)
                f.close()
                if loadCount.get(type):
                    loadCount[type] = int(loadCount.get(type))+1
                else:
                    loadCount[type] = 1
            logger.info('写入知识库 {} 知识'.format(loadCount))
        except Exception as ee:
            logger.error(traceback.format_exc())


if __name__ == '__main__':
    #     document_knowledge = Document2Knowledge(
    #         '''关于保险的商业，上一篇我们讲了保险返本，特别是两全险的返本是怎么回事。咱们今天继续讲返本这个话题，这一篇我主要是讲以下四点。第一个，为什么保险是不能快速返本的？第二个，带身故责任的重疾险是怎我才会返本。第三点，纯重疾险到底是算消费险还是算能返本的保险？第四点，返本的本质到底是什么？
    # 先讲第一点，关于保险为什么不能快速返本，上面有讲到从金融的收益性、安全性和流动性这三个衡量指标来定位。保险是典型的牺牲流动性换取国家级的安全性，提供保守固定收益的金融产品。早期不是没有能快速返本的保险，相反当年银保渠道为主的时候，快速返本的保险是非常多的，但后来都被监管给毙掉了。
    # 监管给保险明确的要求就是不能长险短做，五年是一个界限，每年监管通报的问题清单中，几乎一直都有提这个。所以需要明白是监管限制保险公司，不允许保险公司通过给出过高的一个早期现金价值，来变相的把保险变成短期理财去卖。因为保险公司大部分的利润空间，都是来源于拿保费去做长期投资赚取回来的利差，只有投资周期拉长，才能够比较好的去抚平短期的投资风险，确保保险公司正常经营。如果保险公司可以很短的时间内，但现金价值就超过保费，固然是对我们消费者而言是好让保险有了更好的流动性，但对保险公司，这就是更大的一个经营挑战。保险公司为了能够赚取利润，势必会使用更激进的投策略。而保保险干净对付付的特点，保险公司这么做相当于把风险转移给了国家。所以监管的限制和各种通报就是再三警告保险时不要去为了一时的业绩埋下未来经营的风险，不要目光短视。''')

    # 素材库提取
    mm = MaterialManagement()
    docs = mm.materials_obtaining_from_local('/Users/yuwf/Downloads/保险知识的学习')

    # 知识抽取、加工
    for index,doc in enumerate(docs):
        q_and_a_list = []
        logger.info('素材加工:{}/{}'.format(index,len(docs)))
        document_knowledge = Document2Knowledge(f'''{doc}''')

        result = document_knowledge.chat_with_azure()
        if result:
            list = document_knowledge.parse_knowledge(result)
            if list:
                q_and_a_list.extend(list)
        # 写知识库csv文件
        document_knowledge.write_knowledge_to_csv('/Users/yuwf/Downloads/model/GPT/LLM_master/LLM_internal/internal_server/knowledge_base/raw_data/insurance_planner_gpt',q_and_a_list)

    # 知识库向量化
    # basePath = os.path.dirname(os.path.dirname(__file__))
    # from knowledge_base.knowledge_embedding import AZUREQuery
    # azureQuery = AZUREQuery(os.path.join(basePath,"knowledge_base/embedding_base/insurance_planner_gpt"))
    # azureQuery.save_from_csv([os.path.join(basePath,'knowledge_base/raw_data/insurance_planner_gpt/')])

    # 知识库文件上传到服务器
    '''
    from knowledge_base.transport_utils import FtpUtil
    ftpUtil = FtpUtil()
    ftpUtil.pushFile(['raw_data/insurance_planner_gpt/规划师_用户疑义.csv',
                      'raw_data/insurance_planner_gpt/规划师_用户问题.csv',
                      'raw_data/insurance_planner_gpt/规划师_专业知识.csv',
                      'raw_data/insurance_planner_gpt/规划师_产品知识.csv'])

    ftpUtil.pushFile(['embedding_base/insurance_planner_gpt/index.pkl',
                      'embedding_base/insurance_planner_gpt/index.faiss'])
    '''
