from typing import List

from llama_index.core.agent.workflow import  FunctionAgent
from llama_index.core.indices.common.struct_store.sql import SQLStructDatapointExtractor
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core.vector_stores import SimpleVectorStore
from llama_index.core.schema import  TextNode
from llama_index.core import Settings, SimpleKeywordTableIndex, SummaryIndex, get_response_synthesizer, \
    DocumentSummaryIndex, SimpleDirectoryReader, VectorStoreIndex
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
from llama_index.core.graph_stores import SimplePropertyGraphStore
from llama_index.core.schema import Document
from pydantic import BaseModel
from llama_index.core.indices.property_graph.base import PropertyGraphIndex
from llama_index.core.indices.property_graph.retriever import PGRetriever
from llama_index.core.indices.property_graph.sub_retrievers.base import BasePGRetriever
from llama_index.core.indices.property_graph.sub_retrievers.custom import (
    CustomPGRetriever,
    CUSTOM_RETRIEVE_TYPE,
)
from llama_index.core.indices.property_graph.sub_retrievers.cypher_template import (
    CypherTemplateRetriever,
)
from llama_index.core.indices.property_graph.sub_retrievers.llm_synonym import (
    LLMSynonymRetriever,
)
from llama_index.core.indices.property_graph.sub_retrievers.text_to_cypher import (
    TextToCypherRetriever,
)
from llama_index.core.indices.property_graph.sub_retrievers.vector import (
    VectorContextRetriever,
)
from llama_index.core.indices.property_graph.transformations.implicit import (
    ImplicitPathExtractor,
)
from llama_index.core.indices.property_graph.transformations.schema_llm import (
    SchemaLLMPathExtractor,
)
from llama_index.core.indices.property_graph.transformations.simple_llm import (
    SimpleLLMPathExtractor,
)
from llama_index.core.indices.property_graph.transformations.dynamic_llm import (
    DynamicLLMPathExtractor,
)
from llama_index.core.indices.property_graph.utils import default_parse_triplets_fn

embed_model = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_model=embed_model

from llama_index.llms.deepseek import DeepSeek

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm


# SQL
from llama_index.core.indices.struct_store.sql_query import (
    NLSQLTableQueryEngine,
    PGVectorSQLQueryEngine,
    SQLTableRetrieverQueryEngine,
)
from llama_index.core.query_engine.citation_query_engine import CitationQueryEngine
from llama_index.core.query_engine.cogniswitch_query_engine import (
    CogniswitchQueryEngine,
)
from llama_index.core.query_engine.custom import CustomQueryEngine
from llama_index.core.query_engine.flare.base import FLAREInstructQueryEngine
from llama_index.core.query_engine.graph_query_engine import (
    ComposableGraphQueryEngine,
)
from llama_index.core.query_engine.jsonalyze import (
    JSONalyzeQueryEngine,
)
from llama_index.core.query_engine.knowledge_graph_query_engine import (
    KnowledgeGraphQueryEngine,
)
from llama_index.core.query_engine.multi_modal import SimpleMultiModalQueryEngine
from llama_index.core.query_engine.multistep_query_engine import (
    MultiStepQueryEngine,
)
from llama_index.core.query_engine.pandas.pandas_query_engine import (
    PandasQueryEngine,
)
from llama_index.core.query_engine.retriever_query_engine import (
    RetrieverQueryEngine,
)
from llama_index.core.query_engine.retry_query_engine import (
    RetryGuidelineQueryEngine,
    RetryQueryEngine,
)
from llama_index.core.query_engine.retry_source_query_engine import (
    RetrySourceQueryEngine,
)
from llama_index.core.query_engine.router_query_engine import (
    RetrieverRouterQueryEngine,
    RouterQueryEngine,
    ToolRetrieverRouterQueryEngine,
)
from llama_index.core.query_engine.sql_join_query_engine import SQLJoinQueryEngine
from llama_index.core.query_engine.sql_vector_query_engine import (
    SQLAutoVectorQueryEngine,
)
from llama_index.core.query_engine.sub_question_query_engine import (
    SubQuestionAnswerPair,
    SubQuestionQueryEngine,
)
from llama_index.core.query_engine.transform_query_engine import (
    TransformQueryEngine,
)

documents = SimpleDirectoryReader("./data").load_data()
node_parser = SimpleNodeParser.from_defaults(chunk_size=512)

print(documents)
# 3. 向量存储配置


# 4. 索引构建
index = VectorStoreIndex.from_documents(
    documents,
    node_parser=node_parser
)

retriever = index.as_retriever(similarity_top_k=2)
query_engine = RetrieverQueryEngine(retriever)
response = query_engine.query("解释RAG架构的核心组件")

print(f"回答内容001: {response}")

# 5. 引用引擎初始化
query_engine = CitationQueryEngine.from_args(index)

# 6. 查询示例
response = query_engine.query("解释RAG架构的核心组件")
print(f"回答内容: {response}")


print("\n引用来源:")
for idx, source in enumerate(response.source_nodes):
    print(f"{idx}. {source.node.metadata['file_name']} (相似度: {source.score:.2f})")


