# -*- coding:utf-8 -*-

# @Time  : 2024/2/4 8:19 PM
# @Author: chenyong
# @Email : chenyong@lingxi.ai
# @File  : user_info_util.py
import os

import numpy as np
import json
import sys
import re

# sys.path.append('./')
# sys.path.append('../../../')
curPath = os.path.abspath(os.path.dirname(__file__))
from knowledge_base.local_embedding import LocalEmbeddings
from langchain import FAISS
import pandas as pd
from langchain.schema import Document
import faiss
import pickle
import asyncio
from common.log import logger
import traceback


class ProfessCate:
    def __init__(self):
        self.embedding = LocalEmbeddings()
        self.profess2cate = self.get_profession(curPath + "/../doc/profession.txt")
        self.get_profess_embedding()

    def get_profession(self, file_path):
        profess2cate = {}
        with open(file_path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                line = line.strip().split(':')
                if len(line) != 2:
                    continue
                profess2cate[line[0]] = line[1]
            return profess2cate

    def get_profess_embedding(self):
        if len(self.profess2cate) < 1:
            return ''
        self.professions = list(self.profess2cate.keys())
        profess_embeddings = self.embedding.embed_documents(self.professions)
        self.profess_embeddings = np.array(profess_embeddings)

    def get_profess_cate(self, profess_query, score=0.5):
        if not isinstance(profess_query, str):
            return ''
        key_embedding = self.embedding.embed_query(profess_query)
        key_embedding = np.array(key_embedding)
        scores = key_embedding @ self.profess_embeddings.T
        scores = scores.tolist()
        max_score = max(scores)
        if max_score > score:
            idx = scores.index(max(scores))
            profess = self.professions[idx]
            print(profess)
            return self.profess2cate[profess]
        return ''




class City:
    def __init__(self):
        self.city2province = {}

    def extract_city(self, region):
        """
        提取城市名称。
        参数:
        - region: 地区名称，格式可能是 "xx省yy市" 或 "yy市"
        返回:
        - 城市名称（如果匹配到的话），否则返回 "未知城市"
        """
        match = re.search(r'(?:.*省)?(.+?)市', region)
        if match:
            return match.group(1)
        else:
            return ''

    def get_city(self):
        with open(curPath + "/../doc/city_object.json", 'r') as f:
            city_object = json.load(f)
            for code, city in city_object.items():
                province = city["province"]
                name = city["name"]
                if name == '市辖区':
                    continue
                self.city2province[name] = province

    def get_region(self, city_query):
        pass


class CandidateProducts:
    def __init__(self, embeddings, products_name, products_config):
        self.embeddings = embeddings
        self.products_name = products_name
        self.products_config = products_config
        self.get_products_embedding()

    def get_products_embedding(self):
        products_embeddings = self.embeddings.embed_documents(self.products_name)
        self.products_embeddings = np.array(products_embeddings)

    def get_condidate_products(self, target_product_query, limit_score=0.55):
        if not isinstance(target_product_query, str):
            return None
        key_embedding = self.embeddings.embed_query(target_product_query)
        key_embedding = np.array(key_embedding)
        scores = key_embedding @ self.products_embeddings.T
        scores = scores.tolist()
        res = []
        for i in range(len(scores)):
            score = scores[i]
            if score > limit_score:
                product_name = self.products_name[i]
                if product_name in self.products_config:
                    product_type = self.products_config.get(product_name)['保险类型']
                    product_name_type = self.products_config.get(product_name)['保险产品类型']
                    res.append(
                        product_name + '（保险产品名称）' + '（保险类型：' + product_type + '，保险产品类型：' + product_name_type + '）')
        return res

    def get_sim_product(self, target_product_query, limit_score=0.7):
        if not isinstance(target_product_query, str):
            return None
        key_embedding = self.embeddings.embed_query(target_product_query)
        key_embedding = np.array(key_embedding)
        scores = key_embedding @ self.products_embeddings.T
        scores = scores.tolist()
        max_score = 0.0
        product_name = None
        for i in range(len(scores)):
            score = scores[i]
            if score > limit_score and score > max_score:
                product_name = self.products_name[i]
                max_score = score

        return product_name

class WechatKnowledge:

    def __init__(self):
        self.embeddings = LocalEmbeddings()
        self.folder_path = curPath + "/../knowledge_base/"
        self.doc_path = os.path.join(self.folder_path, "raw_data/")
        self.embedding_path = os.path.join(self.folder_path, "embedding_base/")
        if not os.path.exists(self.embedding_path):
            os.mkdir(self.embedding_path)
        # self.get_knowledge()
        self.doc_data_path = os.path.join(self.embedding_path, "doc.pkl")
        self.doc_index_path = os.path.join(self.embedding_path, "index.faiss")

        # if not os.path.exists(self.doc_data_path) or not os.path.exists(self.doc_index_path):
        self.save_knowledge_embedding()

        self.load_knowledge_embedding()


    def get_knowledge(self):
        meta_datas = list()
        for file in os.listdir(self.doc_path):
            if not file.endswith('.csv'):
                continue
            df = pd.read_csv(self.doc_path + file)
            dictList = df.to_dict('records')
            meta_datas.extend(dictList)
        docs = list()
        for meta_data in meta_datas:
            if "用户问题" in meta_data:
                page_content = "用户问题：" + str(meta_data['用户问题']).strip()
            else:
                page_content = "沟通策略：" + str(meta_data['沟通策略']).strip()

            document = Document(
                page_content=page_content,
                metadata=meta_data)
            docs.append(document)

        self.local_knowledge_base = FAISS.from_documents(docs, self.embeddings)

    def save_knowledge_embedding(self):
        try:
            meta_datas = list()
            for file in os.listdir(self.doc_path):
                if not file.endswith('.csv'):
                    continue
                df = pd.read_csv(self.doc_path + file)
                dictList = df.to_dict('records')
                meta_datas.extend(dictList)
            page_contents = list()
            for meta_data in meta_datas:
                if "用户问题" in meta_data:
                    page_content = "用户问题：" + str(meta_data['用户问题']).strip()
                else:
                    page_content = "沟通策略：" + str(meta_data['沟通策略']).strip()
                page_contents.append(page_content)

            # 将文本转换为向量并存储
            embeddings = self.embeddings.embed_documents(page_contents)
            embeddings_matrix = np.array(embeddings).astype('float32')
            index = faiss.IndexFlatL2(embeddings_matrix.shape[1])
            index.add(embeddings_matrix)
            # 存储
            with open(self.doc_data_path, 'wb') as f:
                pickle.dump(meta_datas, f)
            faiss.write_index(index, self.doc_index_path)

        except Exception as e:
            logger.error(traceback.format_exc())

    def load_knowledge_embedding(self):
        self.doc_index = faiss.read_index(self.doc_index_path)
        with open(self.doc_data_path, 'rb') as f:
            self.doc_data = pickle.load(f)

    async def search_with_score(self, query_str, top_k=8, limit_score=1.2):
        results = []
        try:
            if query_str:
                # 获取query向量
                query_vector = await self.embeddings.async_embed_query(query_str)
                D, I = self.doc_index.search(np.array([query_vector]).astype('float32'), top_k)
                for score, index in zip(D[0], I[0]):
                    doc = self.doc_data[index]
                    if score < limit_score:
                        if '用户问题' in query_str and '用户问题' not in doc:
                            continue
                        if '沟通策略' in query_str and '沟通策略' not in doc:
                            continue
                        # print(score)
                        if "用户问题" in doc:
                            page_content = "用户问题：" + doc["用户问题"]
                        else:
                            page_content = "沟通策略：" + doc["沟通策略"]
                        if not pd.isna(doc["优先级"]):
                            doc["优先级"] = int(doc["优先级"])
                        document = Document(
                            page_content=page_content,
                            metadata=doc)
                        results.append(document)

            return results

        except Exception as e:
            logger.error(traceback.format_exc())


    async def search_with_score_tmp(self, query_str, top_k=8, limit_score=1.2):
        doc_scores = self.local_knowledge_base.similarity_search_with_score(query_str, k=top_k)
        results = []
        for doc, score in doc_scores:
            if score < limit_score:
                results.append(doc)

        return results



class HealthCate:
    def __init__(self):
        self.embedding = LocalEmbeddings()
        self.health2cate = self.get_health_dict(curPath + "/../doc/health_category_dict.txt")
        self.get_health_embedding()

    def get_health_dict(self, file_path):
        health2cate = {}
        with open(file_path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                line = line.strip().split(':')
                if len(line) != 2:
                    continue
                health2cate[line[0]] = line[1]
            return health2cate

    def get_health_embedding(self):
        if len(self.health2cate) < 1:
            return ''
        self.healthions = list(self.health2cate.keys())
        health_embeddings = self.embedding.embed_documents(self.healthions)
        self.health_embeddings = np.array(health_embeddings)

    def get_health_cate(self, health_query, score=0.5):
        if not isinstance(health_query, str):
            return ''
        if health_query == '':
            return ''
        key_embedding = self.embedding.embed_query(health_query)
        key_embedding = np.array(key_embedding)
        scores = key_embedding @ self.health_embeddings.T
        scores = scores.tolist()
        max_score = max(scores)
        if max_score > score:
            idx = scores.index(max(scores))
            health = self.healthions[idx]
            print(health)
            return self.health2cate[health]
        else:
            return '未知疾病类型'


def get_nth_age(age, n=1):
    if isinstance(age, int):
        return age
    if not isinstance(n, int) or n <= 0:
        raise ValueError("n should be a positive integer")
    if isinstance(age, str) and bool(re.search(r'\d+', age)):
        nums = re.findall(r'\d+', age)
        if len(nums) >= n:
            return int(nums[n - 1])
        else:
            return ''
    else:
        return ''
    return ''





if __name__ == "__main__":
    # profess = ProfessCate()
    # print(profess.get_cate("赛车手"))
    # print(profess.get_cate("高铁司机"))

    # city = City()
    # city.get_city()
    # print(city.city2province)

    # import pandas as pd
    # from knowledge_base.document_search import get_products_name
    # from knowledge_base.knowledge_embedding import BGEQuery
    # knowledge_base_query = BGEQuery()
    # embeddings = knowledge_base_query.embeddings
    # current_script_path = os.path.abspath(__file__)
    # csv_file_path = os.path.normpath(os.path.join(current_script_path, '../../doc/products_config.csv'))
    # products_config_df = pd.read_csv(csv_file_path, encoding='utf-8')
    # products_config_df.fillna('', inplace=True)
    # products_config_df = products_config_df.set_index('产品名称')
    # products_config = products_config_df.to_dict(orient='index')
    # products_name = get_products_name(os.path.normpath(os.path.join(current_script_path, "../../doc/products_name.txt")))
    #
    # candpro = CandidateProducts(embeddings=embeddings, products_name=products_name, products_config=products_config)
    # # print('\n'.join(candpro.get_condidate_products("小蜜蜂意外险")))
    # print(candpro.get_sim_product("重大疾病保险"))

    # knowledge_base_query = WechatKnowledge()
    # query_str = "给我打电话干啥啊"
    # answer = asyncio.run(knowledge_base_query.search_with_score(query_str))
    # print(answer)
    HealthCategory = HealthCate()
    print(HealthCategory.get_health_cate("湿疹"))

