import os

import yaml
from langchain_huggingface import HuggingFaceEmbeddings
import pandas as pd
from loguru import logger
from langchain_chroma import Chroma
from pptx import Presentation
from tqdm import tqdm
from langchain_openai import OpenAIEmbeddings


class ChromaDataloadEngineSearch:
    def __init__(self, config_yaml_path="./config_for_search_api.yaml"):
        with open(config_yaml_path, 'r', encoding='utf-8') as file:
            self.yaml_ = yaml.safe_load(file)
        # 本地
        model_kwargs = {'device': self.yaml_["Embedding"]["GPU_device"], 'trust_remote_code': True}
        encode_kwargs = {'normalize_embeddings': False}
        self.embeddings = HuggingFaceEmbeddings(model_name=self.yaml_["Embedding"]["model_path"],
                                                model_kwargs=model_kwargs,
                                                encode_kwargs=encode_kwargs)

        self.chroma_dir_path = self.yaml_["chroma_dir"]["independent_project"]
        self.history_project_vectorstore = Chroma(embedding_function=self.embeddings,
                                                  persist_directory=self.chroma_dir_path)

    def init_chroma(self, all_list):

        data_le = self.history_project_vectorstore._collection.count()
        if data_le < 1:
            logger.info("历史获奖项目数据向量初始化")
            batch_size = 5  # 可调，显存允许的情况下
            total_length = len(all_list)
            for i in tqdm(range(0, total_length, batch_size), total=total_length // batch_size):
                batch = all_list[i:i + batch_size]
                self.history_project_vectorstore.add_texts(batch)
            data_le = self.history_project_vectorstore._collection.count()
        logger.info(f"历史获奖项目项目数据向量  加载了{data_le}条")

    def extract_tables_from_slide(self, slide):
        """提取幻灯片中的所有表格数据"""
        tables = []
        for shape in slide.shapes:
            if shape.has_table:
                table = shape.table
                table_data = []
                for row in table.rows:
                    row_data = []
                    for cell in row.cells:
                        row_data.append(cell.text.strip())
                    table_data.append(row_data)
                tables.append(table_data)
        return tables

    def find_slides_with_tables(self, pptx_path, target_text):
        prs = Presentation(pptx_path)
        found_slides = []

        for i, slide in enumerate(prs.slides, start=1):
            # 检查是否包含目标文本
            text_found = False
            for shape in slide.shapes:
                if hasattr(shape, "text") and target_text in shape.text:
                    text_found = True
                    break

            # 如果找到目标文本，提取表格
            if text_found:
                tables = self.extract_tables_from_slide(slide)
                if tables:
                    found_slides.append({
                        "页码": i,
                        "表格数据": tables
                    })

        return found_slides

    def transpose(self, matrix):
        # 检查矩阵是否为空或者不是一个二维列表
        if not matrix or not all(isinstance(row, list) for row in matrix):
            return []

        # 使用列表推导式对调行和列
        return [list(row) for row in zip(*matrix)]

    def ppt_loader(self, ppt_path):
        ppt_name = os.path.basename(ppt_path)
        target_ = "投资估算"
        result = self.find_slides_with_tables(ppt_path, target_text=target_)
        out_tables_list = []

        if result:
            for slide in result:
                # print(f"\n【第 {slide['页码']} 页】")

                for i, table in enumerate(slide["表格数据"], start=1):
                    input_tmp_ = "<<" + ppt_name + ">>:"
                    table = self.transpose(table)
                    for row in table[:-1]:
                        input_tmp_ = input_tmp_ + (",".join(row))
                    out_tables_list.append(input_tmp_)

            return True, out_tables_list
        else:
            return False, []

    def load_dir_pptxs(self, dir_path):
        pptx_files = []
        for root, dirs, files in os.walk(dir_path):
            for file in files:
                if file.lower().endswith(".pptx"):
                    pptx_files.append(os.path.join(root, file))
        all_list = []
        for ppt_path in pptx_files:
            bool_find_table, this_list = self.ppt_loader(ppt_path)
            if bool_find_table:
                all_list = all_list + this_list
        return all_list

    def distance_to_percent(self, distance, min_distance=0.0, max_distance=2.0):
        """将距离分数转换为百分比相似度"""
        similarity = 1 - (distance - min_distance) / (max_distance - min_distance)
        return max(0.0, min(100.0, similarity * 100))  # 限制在0~100之间

    def history_similarity_search(self, query):
        self.init_chroma(self.load_dir_pptxs("./ppt_datas/"))
        result = self.history_project_vectorstore.similarity_search_with_score(query, k=5)  # 带分数的
        return_list = []
        for ri in result:
            # logger.info(f"检索得分：{ri[1]}")
            # if result[0][1]>0.95:
            #     return '','','',''
            format_res = ri[0].page_content

            return_list.append([format_res, self.distance_to_percent(ri[1])])
        return return_list


if __name__ == '__main__':
    engine = ChromaDataloadEngineSearch()
    print(engine.history_similarity_search("ppt_datas/快速立项-山东移动2024年网渠集中运营大屏项目方案汇报.pptx"))
    exit(0)

    #
    # engine.init_chroma(
    #     engine.xlsx1_load_list() +
    #     engine.xlsx2_load_list() +
    #     engine.xlsx3_load_list() +
    #     engine.xlsx4_load_list() +
    #     engine.xlsx5_load_list() +
    #     engine.xlsx6_load_list()
    # )

    result = engine.history_similarity_search(query)
    print(result)
