import asyncio
import os
from datetime import datetime

from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import SimpleDirectoryReader, Document
from abc import abstractmethod
from llama_index.core import VectorStoreIndex, load_index_from_storage, SummaryIndex
from llama_index.core.indices.base import BaseIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.response_synthesizers import ResponseMode
from llama_index.core.storage.storage_context import DEFAULT_PERSIST_DIR, StorageContext


from llama_index.vector_stores.milvus import MilvusVectorStore
from pymilvus import connections, Collection, CollectionSchema, FieldSchema, DataType


from config import RagConfig

from llama_index.core import Settings
from embeddings import embed_model_local_bge_small
from llms import deepseek_llm
Settings.embed_model = embed_model_local_bge_small()
Settings.llm = deepseek_llm()


file='/home/tom/my_learn/my_danwen/0_playgrounds/0_test_data/changchun.txt'

data = SimpleDirectoryReader(input_files=[file]).load_data()
doc = Document(text="\n\n".join([d.text for d in data[0:]]), metadata={"path": file})
docs=[doc]


node_parser = SentenceSplitter.from_defaults()
# 从文档中获取节点
nodes = node_parser.get_nodes_from_documents(docs)
print('')

# # 创建向量存储索引
# vector_store = MilvusVectorStore(
#     uri=RagConfig.milvus_uri,
#     collection_name='bbbb', dim=RagConfig.embedding_model_dim, overwrite=True
# )
# storage_context = StorageContext.from_defaults(vector_store=vector_store)
# index = VectorStoreIndex(nodes, storage_context=storage_context)

# VectorStoreIndex.from_vector_store(vector_store=vector_store)