
from typing import Any, Dict, List, Tuple

from llama_index.core.chat_engine import CondenseQuestionChatEngine
from llama_index.core.indices.list.base import SummaryIndex
from llama_index.core.indices.struct_store.sql import (
    SQLContextContainerBuilder,
    SQLStructStoreIndex,
)
from llama_index.core import Settings, SimpleKeywordTableIndex, SummaryIndex, get_response_synthesizer, \
    DocumentSummaryIndex
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding

from llama_index.core.indices.property_graph.utils import default_parse_triplets_fn

embed_model = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_mode =embed_model

from llama_index.llms.deepseek import DeepSeek

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm


from llama_index.core.indices.struct_store.json_query import JSONQueryEngine
from llama_index.core.indices.struct_store.pandas import (
    GPTPandasIndex,
    PandasIndex,
)
from llama_index.core.indices.struct_store.sql import (
    GPTSQLStructStoreIndex,
    SQLContextContainerBuilder,
    SQLStructStoreIndex,
)
from llama_index.core.indices.struct_store.sql_query import (
    GPTNLStructStoreQueryEngine,
    GPTSQLStructStoreQueryEngine,
    NLSQLTableQueryEngine,
    NLStructStoreQueryEngine,
    SQLStructStoreQueryEngine,
    SQLTableRetrieverQueryEngine,
)

def infer_schema(json_data: Dict[str, Any]) -> Dict[str, Any]:
    """自动推断JSON数据的Schema结构"""
    schema = {"type": "object", "properties": {}, "required": []}

    for key, value in json_data.items():
        schema["properties"][key] = {}
        if isinstance(value, str):
            schema["properties"][key]["type"] = "string"
        elif isinstance(value, bool):
            schema["properties"][key]["type"] = "boolean"
        elif isinstance(value, (int, float)):
            schema["properties"][key]["type"] = "number"
        elif isinstance(value, list):
            schema["properties"][key]["type"] = "array"
            if value and isinstance(value[0], dict):
                schema["properties"][key]["items"] = infer_schema(value[0])
        elif isinstance(value, dict):
            schema["properties"][key].update(infer_schema(value))

        schema["required"].append(key)

    return schema

# 加载 JSON 数据
json_data = {"name": "John", "age": 30, "city": "New York"}

json_schema  =infer_schema(json_data)
print(json_schema)

from llama_index.core.query_engine import JSONalyzeQueryEngine

# 创建查询引擎
query_engine = JSONQueryEngine(json_data ,json_schema)

# 执行查询
response = query_engine.query("What is the person's age?")

print(response)  # 输出: 30
response = query_engine.query("ciyt?")

print(response)  # 输出: 30


condenseQuestionChatEngine= CondenseQuestionChatEngine.from_defaults(query_engine)

rs=condenseQuestionChatEngine.chat("What is the person's age?")

print(rs)


rs=condenseQuestionChatEngine.chat("ciyt?")

print(rs)


