from typing import List

from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_zhipu import ZhipuAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders import Docx2txtLoader
from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders.text import TextLoader
# from PIL import Image
from langchain_community.document_loaders.image import UnstructuredImageLoader
import os
from functools import lru_cache
from customer_logging import get_logger
#
# import cv2
# import pytesseract

import faiss
from pathlib import Path
import uuid

# 设置tesseract-ocr安装路径
# pytesseract.pytesseract.tesseract_cmd = r'C:\Users\Yang.Shen9\AppData\Local\Programs\Tesseract-OCR\tesseract.exe'  # 根据实际路径修改

index_name = "my_vectors.faiss"

index_path = Path(index_name)

logger = get_logger("doc-")


class FAISSService:
    vectorstore = None
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=0)
    embeddings = ZhipuAIEmbeddings(api_key="9b1f2582ae0c44c036ba1fdabed75c7b.FsfpGGSXKU4EZdsf")

    # 缓存知识库数量
    CACHED_VS_NUM = 1

    def __init__(self):
        if index_path.exists():
            self.vectorstore = self.load_vector_store()
            self.vectorstore.save_local(index_name)
            return
        # 文本的分割
        all_splits = self.text_splitter.split_documents(self.load_os_file("One"))
        db = FAISS.from_documents(all_splits, embedding=self.embeddings)
        self.vectorstore = db
        self.vectorstore.save_local(index_name)

    @staticmethod
    def load_os_file(file_path) -> list:
        documents = []
        for file_name in os.listdir(file_path):
            # 构造文件的完整路径
            full_file_path = os.path.join(file_path, file_name)

            # 检查是否为文件
            if os.path.isfile(full_file_path):
                if file_name.endswith('.pdf'):
                    loader = PyPDFLoader(full_file_path)
                    documents.extend(loader.load())
                elif file_name.endswith('.docx') or file_name.endswith('.doc'):
                    loader = Docx2txtLoader(full_file_path)
                    documents.extend(loader.load())
                else:
                    # 这里假设 TextLoader 可以处理其他类型的文件
                    loader = TextLoader(full_file_path, encoding='utf-8')
                    documents.extend(loader.load())

        return documents

    def load_file(self, file_path):
        document = None
        if file_path.endswith('.pdf'):
            loader = PyPDFLoader(file_path)
            document = loader.load()
        elif file_path.endswith('.docx') or file_path.endswith('.doc'):
            loader = Docx2txtLoader(file_path)
            document = loader.load()
        # elif file_path.endswith('.jpg') or file_path.endswith('.png'):
        #     logger.info("开始写入读取图片")
        #     # 读取图片
        #     image = cv2.imread(file_path)
        #     gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        #     _, thresh = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY_INV)
        #     # 使用pytesseract进行文字识别
        #     content = pytesseract.image_to_string(Image.fromarray(thresh), lang='chi_sim')
        #     with open("One/page.txt", "w", encoding="utf-8") as f:
        #         f.write(content)
        #     loader = TextLoader("One/page.txt", encoding='utf-8')
        #     logger.info("读取图片完成")
        #     document = loader.load()
        #     logger.info(document)
        else:
            # 这里假设 TextLoader 可以处理其他类型的文件
            logger.info("开始写入文档")
            loader = TextLoader(file_path, encoding='utf-8')
            document = loader.load()
            logger.info("文档读取完成")
        all_splits = self.text_splitter.split_documents(document)
        logger.info("文档拆分完成")
        self.write_doc(all_splits)

    # will keep CACHED_VS_NUM of vector store caches
    @lru_cache(CACHED_VS_NUM)
    def load_vector_store(self):
        return FAISS.load_local(index_name, self.embeddings, allow_dangerous_deserialization=True)

    def similarity_search_with_score(self, question, metadata: dict):
        return self.vectorstore.similarity_search(question, filter=metadata)

    def all_data(self):
        return self.vectorstore.docstore._dict

    def del_data(self, ids: List[str]):
        return self.vectorstore.delete(ids=ids)

    def write_templates(self, template):
        logger.info("短知识开始训练")
        logger.info("知识留存" + template)
        uid_hex = uuid.uuid4().hex
        new_vector_store = FAISS.from_texts([template], self.embeddings, ids=[uid_hex])
        logger.info(new_vector_store.docstore._dict)
        self.vectorstore.merge_from(new_vector_store)
        self.vectorstore.save_local(index_name)
        logger.info("短知识训练完成")
        return uid_hex

    def write_doc(self, documents):
        logger.info("文档开始写入")
        vectorstore_new = FAISS.from_documents(documents, self.embeddings)
        logger.info("文档开始合并")
        self.vectorstore.merge_from(vectorstore_new)
        logger.info("文档写入本地索引")
        self.vectorstore.save_local(index_name)
        logger.info("文档上传完成")

    # def get_template(inputs):
    #     vector_store = load_vector_store()
    #     related_docs_with_score = vector_store.similarity_search_with_score(inputs, k=VECTOR_SEARCH_TOP_K)
    #     # print(related_docs_with_score)
    #     tempalte = related_docs_with_score[0][0].page_content
