from typing import Union,Optional,Iterator,TypedDict,Any
import os,shutil,asyncio

from langchain_community.document_loaders import DirectoryLoader,TextLoader
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter 
from langchain.chat_models import init_chat_model
from openai import RateLimitError

from model_config import embedding_model_define,get_embedding_model_default
from vector_store_config import get_vector_storage_default

# 嵌入模型
embedding=get_embedding_model_default()
# 向量存储
vector_store=get_vector_storage_default(embedding=embedding)

# 加载数据
data_folder:str='.\\data\\hy'
loaded_data_folder:str=os.path.abspath(os.path.join(data_folder,f'..\\{os.path.basename(data_folder)}_Loaded'))
if not os.path.exists(loaded_data_folder):
  os.makedirs(loaded_data_folder)
loader:DirectoryLoader=DirectoryLoader(
  data_folder,
  glob='**/*.txt',
  loader_cls=TextLoader,
  # sample_size=2,
  show_progress=True,
  loader_kwargs={'autodetect_encoding':True},
)
docs:Iterator[Document]=loader.lazy_load()

# 分割数据
text_splitter:RecursiveCharacterTextSplitter=RecursiveCharacterTextSplitter(
  chunk_size=embedding_model_define.split_chunk_size,
  chunk_overlap=embedding_model_define.split_chunk_size*0.2,
)

# 保存到向量存储库
async def store_documents(docs:Iterator[Document]=iter([])):
  for doc in docs:
    try:
      source:str=doc.metadata['source']
      # splited_docs:list[Document]=text_splitter.split_documents(iter([doc]))
      splited_docs=[doc]
      ids:list[str]=[]
      for index,_ in enumerate(splited_docs):
        id:str=f'{os.path.basename(source)}_{index}'
        ids.append(id)
      success:bool=False
      while not success:
        try:
          vector_store.add_documents(
            documents=splited_docs,
            ids=ids,
            batch_size=embedding_model_define.batch_size,
            
          )
          success=True
        except RateLimitError:
          print('模型服务访问频率限制，等待60秒后继续...')
          await asyncio.sleep(60)
          print('继续尝试add_documents')
      shutil.move(source,loaded_data_folder)
    except Exception as e:
      print(type(e))
      print(e)
      raise e

asyncio.run(store_documents(docs))

# 查询向量存储
# search_result_docs=vector_store.similarity_search(query)
# retriever_result_docs=vector_store.as_retriever().invoke(query)
# print(retriever_result_docs)


