from llama_index.llms.openai import OpenAI 
from llama_index.core.chat_engine import SimpleChatEngine

# def deepseek_llm(**kwargs):
#     llm = OpenAI(api_key='sk-79fa0380ce9c4c3297e51451baab0d09',
#                  model="deepseek-chat",
#                  api_base="https://api.deepseek.com/v1",
#                  temperature=0.7,
#                  **kwargs)
#     return llm
# from typing import Dict
from llama_index.core import SimpleDirectoryReader,VectorStoreIndex,Settings,get_response_synthesizer,StorageContext,load_index_from_storage,Document
from ollama import chat

# from llama_index.llms.openai.utils import ALL_AVAILABLE_MODELS, CHAT_MODELS
# DEEPSEEK_MODELS: Dict[str, int] = {
#     "deepseek-chat": 128000,
# }
# ALL_AVAILABLE_MODELS.update(DEEPSEEK_MODELS)
# CHAT_MODELS.update(DEEPSEEK_MODELS)

# def moonshot_llm(**kwargs):
#     llm = OpenAI(api_key='lYp1RcVMaX4uHjcnLasZ75DPNrpXSxx0dJTDxBCKZ1TJKHPi',
#                  base_url="https://xiaoai.plus/v1",
#                  **kwargs)
#     return llm

# def deepseek_llm(**kwargs):
#     llm = OpenAI(api_key='lYp1RcVMaX4uHjcnLasZ75DPNrpXSxx0dJTDxBCKZ1TJKHPi',
#                  model="gpt-3.5-turbo",
#                  api_base="https://xiaoai.plus/v1",
#                  temperature=0.7,
#                  **kwargs)
#     return llm

from llms import deepseek_llm
# Settings.llm = deepseek_llm()

# from llama_index.core import Settings
Settings.llm = deepseek_llm()

chat_engine=SimpleChatEngine.from_defaults()
chat_engine.streaming_chat_repl()
# chat_engine.chat('你好')