from openinference.instrumentation.llama_index import LlamaIndexInstrumentor
from phoenix.otel import register
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
import os
import asyncio
import datetime
from typing import List

from llama_index.core.agent.workflow import  FunctionAgent
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.extractors import KeywordExtractor
from llama_index.core.storage.chat_store.sql import SQLAlchemyChatStore
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.vector_stores import SimpleVectorStore
from llama_index.core.schema import TextNode, NodeWithScore, QueryBundle
from llama_index.core import Settings, SimpleKeywordTableIndex, SummaryIndex, get_response_synthesizer
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
from llama_index.core.graph_stores import SimplePropertyGraphStore
from llama_index.core.schema import Document
from pydantic import BaseModel

embed_model = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_model=embed_model

import os


from llama_index.llms.deepseek import DeepSeek

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm


tracer_provider = register()
LlamaIndexInstrumentor().instrument(tracer_provider=tracer_provider)

documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("Some question about the data should go here")
print(response)