import os
import uuid
from functools import lru_cache
from pathlib import Path
from typing import List

from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader, TextLoader
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.embeddings import ZhipuAIEmbeddings


INDEX_NAME = "my_vectors.faiss"


class FAISSService:
	vectorstore = None
	text_splitter = RecursiveCharacterTextSplitter(
		chunk_size=500,
		chunk_overlap=50,
		keep_separator=True
	)
	embeddings = ZhipuAIEmbeddings(api_key="94212a6b8ec941ce944026c45e518c75.NsD7wvYp15f2cDLL")
	CACHED_VS_NUM = 1

	def __init__(self, index_name: str):
		self.index_name = index_name
		index_path = Path(index_name)
		self.documents = []
		if index_path.exists():
			self.vectorstore = self.load_vector_store()
			self.documents = list(self.vectorstore.docstore._dict.values())
			return
		# 首次加载当前目录 One/ 下的文件
		raw_docs = self.load_os_file("One")
		all_splits = self.text_splitter.split_documents(raw_docs)
		self.documents = all_splits
		db = FAISS.from_documents(all_splits, embedding=self.embeddings)
		self.vectorstore = db

	def get_embeddings(self):
		return self.embeddings

	@staticmethod
	def load_os_file(file_path) -> list:
		documents = []
		if not os.path.isdir(file_path):
			return documents
		for file_name in os.listdir(file_path):
			full_file_path = os.path.join(file_path, file_name)
			if os.path.isfile(full_file_path):
				if file_name.endswith('.pdf'):
					loader = PyPDFLoader(full_file_path)
					documents.extend(loader.load())
				elif file_name.endswith('.docx') or file_name.endswith('.doc'):
					loader = Docx2txtLoader(full_file_path)
					documents.extend(loader.load())
				else:
					loader = TextLoader(full_file_path, encoding='utf-8')
					documents.extend(loader.load())
		return documents

	def load_file(self, file_path):
		if file_path.endswith('.pdf'):
			loader = PyPDFLoader(file_path)
			document = loader.load()
		elif file_path.endswith('.docx') or file_path.endswith('.doc'):
			loader = Docx2txtLoader(file_path)
			document = loader.load()
		else:
			loader = TextLoader(file_path, encoding='utf-8')
			document = loader.load()
		all_splits = self.text_splitter.split_documents(document)
		self.write_doc(all_splits)

	@lru_cache(CACHED_VS_NUM)
	def load_vector_store(self):
		return FAISS.load_local(self.index_name, self.embeddings, allow_dangerous_deserialization=True)

	def all_data(self):
		return self.vectorstore.docstore

	def del_data(self, ids: List[str]):
		self.vectorstore.delete(ids=ids)
		self.vectorstore.save_local("agent_" + INDEX_NAME)

	def write_templates(self, template, this_index_name=None):
		this_index_name = this_index_name or INDEX_NAME
		uid_hex = uuid.uuid4().hex
		new_vector_store = FAISS.from_texts([template], self.embeddings, ids=[uid_hex])
		self.vectorstore.merge_from(new_vector_store)
		self.vectorstore.save_local(this_index_name)
		return uid_hex

	def write_doc(self, documents):
		vectorstore_new = FAISS.from_documents(documents, self.embeddings)
		self.vectorstore.merge_from(vectorstore_new)
		self.vectorstore.save_local(INDEX_NAME)


