from typing import List

from llama_index.core.agent.workflow import  FunctionAgent
from llama_index.core.indices.common.struct_store.sql import SQLStructDatapointExtractor
from llama_index.core.vector_stores import SimpleVectorStore
from llama_index.core.schema import  TextNode
from llama_index.core import Settings, SimpleKeywordTableIndex, SummaryIndex, get_response_synthesizer, \
    DocumentSummaryIndex
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
from llama_index.core.graph_stores import SimplePropertyGraphStore
from llama_index.core.schema import Document
from pydantic import BaseModel
from llama_index.core.indices.property_graph.base import PropertyGraphIndex
from llama_index.core.indices.property_graph.retriever import PGRetriever
from llama_index.core.indices.property_graph.sub_retrievers.base import BasePGRetriever
from llama_index.core.indices.property_graph.sub_retrievers.custom import (
    CustomPGRetriever,
    CUSTOM_RETRIEVE_TYPE,
)
from llama_index.core.indices.property_graph.sub_retrievers.cypher_template import (
    CypherTemplateRetriever,
)
from llama_index.core.indices.property_graph.sub_retrievers.llm_synonym import (
    LLMSynonymRetriever,
)
from llama_index.core.indices.property_graph.sub_retrievers.text_to_cypher import (
    TextToCypherRetriever,
)
from llama_index.core.indices.property_graph.sub_retrievers.vector import (
    VectorContextRetriever,
)
from llama_index.core.indices.property_graph.transformations.implicit import (
    ImplicitPathExtractor,
)
from llama_index.core.indices.property_graph.transformations.schema_llm import (
    SchemaLLMPathExtractor,
)
from llama_index.core.indices.property_graph.transformations.simple_llm import (
    SimpleLLMPathExtractor,
)
from llama_index.core.indices.property_graph.transformations.dynamic_llm import (
    DynamicLLMPathExtractor,
)
from llama_index.core.indices.property_graph.utils import default_parse_triplets_fn

embed_model = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_model=embed_model

from llama_index.llms.deepseek import DeepSeek

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm
from llama_index.core.prompts.base import (
    BasePromptTemplate,
    ChatPromptTemplate,
    LangchainPromptTemplate,
    Prompt,
    PromptTemplate,
    PromptType,
    SelectorPromptTemplate,
)
from llama_index.core.prompts.rich import RichPromptTemplate
from llama_index.core.prompts.display_utils import display_prompt_dict

custom_prompt = PromptTemplate(
    """请根据标记的上下文回答，缺失信息时声明：
    --------
    {formatted_context}
    --------
    问题：{query}
    回答："""
)
rs=custom_prompt.format(formatted_context="ok")
print(rs)



from llama_index.core import PromptTemplate
from llama_index.core.prompts import SelectorPromptTemplate

# 定义模板
detailed_template = PromptTemplate("""
请基于以下上下文提供详细解答（含示例）：
{context_str}
问题：{query_str}
""")

concise_template = PromptTemplate("""
直接回答核心结论：
{context_str}
问题：{query_str}
""")

# 创建选择器（通过对话轮次判断）
selector_prompt = SelectorPromptTemplate(
    default_template=detailed_template,
    conditionals=[
        (lambda kwargs:True, concise_template)
    ]
)


print(selector_prompt.render())


def format_bullets(text):
    return "\n".join(f"• {line}" for line in text.split("\n"))

template = RichPromptTemplate(
    "Formatted Context:\n{{ bullet_context }}\nQuestion: {{ query }}",
    function_mappings={"bullet_context": format_bullets}
)


