from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core import  GPTVectorStoreIndex,VectorStoreIndex
from llama_index.llms import openai_like
from llama_index.core import Settings
from llama_index.llms.ollama import Ollama
from llama_index.embeddings.huggingface import HuggingFaceEmbedding  # HuggingFaceEmbedding:用于将文本转换为词向量
from llama_index.llms.huggingface import HuggingFaceLLM  # HuggingFaceLLM：用于运行Hugging Face的预训练语言模型
from llama_index.core import Settings,SimpleDirectoryReader,VectorStoreIndex
import chromadb
from llama_index.embeddings.dashscope import DashScopeEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.llms.deepseek  import DeepSeek
from llama_index.embeddings.fastembed import FastEmbedEmbedding
    # 连接Chroma数据库


llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm
embed_model = FastEmbedEmbedding(model_name="BAAI/bge-small-en-v1.5")
Settings.embed_model = embed_model

 



'''
chroma_client = chromadb.PersistentClient(path="./chroma_db")
chroma_collection=chroma_client.get_collection("docs")
vector_store=ChromaVectorStore(chroma_collection=chroma_collection)
#Settings.llm = Ollama(model="mistral", request_timeout=60.0)

embed_model = HuggingFaceEmbedding(
    # 指定了一个预训练的sentence-transformer模型的路径
    model_name=r"paraphrase-multilingual-MiniLM-L12-v2"
)

# api_key="sk-605e60a1301040759a821b6b677556fb", base_url="https://api.deepseek.com"

 

# Call the complete method with a query

print(response)

Settings.embed_model = DashScopeEmbedding(
    model_name="text-embedding-v2"
)

Settings.embed_model = embed_model


documents = SimpleDirectoryReader('data').load_data()

parser = SimpleNodeParser()

nodes = parser.get_nodes_from_documents(documents,vector_store=vector_store)

index = VectorStoreIndex(nodes)

#
#storage_context = StorageContext.from_defaults(persist_dir="./storage")
# load index
#index = load_index_from_storage(storage_context)


query_engine = index.as_query_engine()

output=query_engine.query("登录名")


print(output)



nodes = parser.get_nodes_from_documents(documents)

print(len(nodes))


documents = SimpleDirectoryReader('data').load_data()

print(len( documents))

print(documents)
'''