 
from llama_index.core import Settings
 
  
from llama_index.llms.deepseek  import DeepSeek
 


    # 连接Chroma数据库


llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm
 
from zhipuai import ZhipuAI
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding

embeddings = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_model=embeddings

from llama_index.core import StorageContext, load_index_from_storage
from llama_index.core import Settings,SimpleDirectoryReader,VectorStoreIndex

documents = SimpleDirectoryReader("./data/paul_graham").load_data()
index = VectorStoreIndex.from_documents(documents)
chat_engine = index.as_chat_engine(
    chat_mode="condense_question", streaming=True
)
print("OK")
response_stream = chat_engine.stream_chat("What did Paul Graham do after YC?")
response_stream.print_response_stream()
response_stream = chat_engine.stream_chat("What about after that?")
response_stream.print_response_stream()
 


