


from typing import Any, Dict, List, Tuple

from llama_index.core.indices.list.base import SummaryIndex
from llama_index.core.indices.struct_store.sql import (
    SQLContextContainerBuilder,
    SQLStructStoreIndex,
)
from llama_index.core.indices.struct_store.sql_query import (
    NLStructStoreQueryEngine,
)
from llama_index.core.schema import (
    BaseNode,
    Document,
    NodeRelationship,
    QueryBundle,
    RelatedNodeInfo,
    TextNode,
)
from llama_index.core.utilities.sql_wrapper import SQLDatabase
from sqlalchemy import (
    Column,
    Integer,
    MetaData,
    String,
    Table,
    create_engine,
    delete,
    select,
)
from typing import List

from llama_index.core.agent.workflow import  FunctionAgent
from llama_index.core.indices.common.struct_store.sql import SQLStructDatapointExtractor
from llama_index.core.vector_stores import SimpleVectorStore
from llama_index.core.schema import  TextNode
from llama_index.core import Settings, SimpleKeywordTableIndex, SummaryIndex, get_response_synthesizer, \
    DocumentSummaryIndex
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
from llama_index.core.graph_stores import SimplePropertyGraphStore
from llama_index.core.schema import Document
from pydantic import BaseModel
from llama_index.core.indices.property_graph.base import PropertyGraphIndex
from llama_index.core.indices.property_graph.retriever import PGRetriever
from llama_index.core.indices.property_graph.sub_retrievers.base import BasePGRetriever
from llama_index.core.indices.property_graph.sub_retrievers.custom import (
    CustomPGRetriever,
    CUSTOM_RETRIEVE_TYPE,
)
from llama_index.core.indices.property_graph.sub_retrievers.cypher_template import (
    CypherTemplateRetriever,
)
from llama_index.core.indices.property_graph.sub_retrievers.llm_synonym import (
    LLMSynonymRetriever,
)
from llama_index.core.indices.property_graph.sub_retrievers.text_to_cypher import (
    TextToCypherRetriever,
)
from llama_index.core.indices.property_graph.sub_retrievers.vector import (
    VectorContextRetriever,
)
from llama_index.core.indices.property_graph.transformations.implicit import (
    ImplicitPathExtractor,
)
from llama_index.core.indices.property_graph.transformations.schema_llm import (
    SchemaLLMPathExtractor,
)
from llama_index.core.indices.property_graph.transformations.simple_llm import (
    SimpleLLMPathExtractor,
)
from llama_index.core.indices.property_graph.transformations.dynamic_llm import (
    DynamicLLMPathExtractor,
)
from llama_index.core.indices.property_graph.utils import default_parse_triplets_fn

embed_model = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_model=embed_model

from llama_index.llms.deepseek import DeepSeek

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm


from llama_index.core.indices.struct_store.json_query import JSONQueryEngine
from llama_index.core.indices.struct_store.pandas import (
    GPTPandasIndex,
    PandasIndex,
)
from llama_index.core.indices.struct_store.sql import (
    GPTSQLStructStoreIndex,
    SQLContextContainerBuilder,
    SQLStructStoreIndex,
)
from llama_index.core.indices.struct_store.sql_query import (
    GPTNLStructStoreQueryEngine,
    GPTSQLStructStoreQueryEngine,
    NLSQLTableQueryEngine,
    NLStructStoreQueryEngine,
    SQLStructStoreQueryEngine,
    SQLTableRetrieverQueryEngine,
)

def infer_schema(json_data: Dict[str, Any]) -> Dict[str, Any]:
    """自动推断JSON数据的Schema结构"""
    schema = {"type": "object", "properties": {}, "required": []}
    
    for key, value in json_data.items():
        schema["properties"][key] = {}
        if isinstance(value, str):
            schema["properties"][key]["type"] = "string"
        elif isinstance(value, bool):
            schema["properties"][key]["type"] = "boolean"
        elif isinstance(value, (int, float)):
            schema["properties"][key]["type"] = "number"
        elif isinstance(value, list):
            schema["properties"][key]["type"] = "array"
            if value and isinstance(value[0], dict):
                schema["properties"][key]["items"] = infer_schema(value[0])
        elif isinstance(value, dict):
            schema["properties"][key].update(infer_schema(value))
            
        schema["required"].append(key)
    
    return schema

# 加载 JSON 数据
json_data = {"name": "John", "age": 30, "city": "New York"}

json_schema=infer_schema(json_data)
print(json_schema)

from llama_index.core.query_engine import JSONalyzeQueryEngine

# 创建查询引擎
query_engine = JSONQueryEngine(json_data,json_schema)

# 执行查询
response = query_engine.query("What is the person's age?")

print(response)  # 输出: 30
