from typing import List

from llama_index.core.agent.workflow import  FunctionAgent
from llama_index.core.vector_stores import SimpleVectorStore
from llama_index.core.schema import  TextNode
from llama_index.core import Settings, SimpleKeywordTableIndex, SummaryIndex, get_response_synthesizer
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
from llama_index.core.graph_stores import SimplePropertyGraphStore
from llama_index.core.schema import Document
from pydantic import BaseModel

embed_model = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_model=embed_model

from llama_index.llms.deepseek import DeepSeek

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm
from llama_index.core.extractors.metadata_extractors import (
    KeywordExtractor,
    PydanticProgramExtractor,
    QuestionsAnsweredExtractor,
    SummaryExtractor,
    TitleExtractor,
)
from llama_index.core.extractors.document_context import DocumentContextExtractor


text="""2022年6月14日，江苏南京大报恩塔与“超级月亮”相映成景。 """
node=TextNode(text=text)

from llama_index.core.prompts import PromptTemplate


from pydantic import BaseModel, Field
'''
class Cat(BaseModel):
    ''''''
    name: str = Field(description="名称")

prompt = PromptTemplate("Please predict a Cat with a random name related to {topic}.")
output = llm.structured_predict(Cat, prompt, topic="cats")
print(output)

prompt = PromptTemplate("Please predict a Cat with a random name related to {topic}.")

class Cat(BaseModel):
  
    name: str = Field(description="名称")

stream_output = llm.stream_structured_predict(Cat, prompt, topic="cats")
for partial_output in stream_output:
    # stream partial outputs until completion

    print(partial_output)
    print(partial_output.name)


from llama_index.core.prompts import PromptTemplate

prompt = PromptTemplate("Please write a random name related to {topic}.")
output = llm.predict(prompt, topic="国家")
print(output)
'''

from llama_index.core.multi_modal_llms.base import (
    MultiModalLLM,
    MultiModalLLMMetadata,
)

MultiModalLLM