from email.policy import strict
# from urllib import response
from exceptiongroup import catch
import nest_asyncio
from llama_index.core.tools import FunctionTool
from typing import List
from llama_index.core.vector_stores import MetadataFilters,FilterCondition
from llama_index.core import SimpleDirectoryReader,Settings,VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.ollama import Ollama

nest_asyncio.apply()

# 该示例体现了RAG可以使用自定义的function，而不是通过自己的大模型去跑逻辑
# 本例中，自定义add和mystery两个function，大模型就会完全按照你给出的function来执行业务

# 测试例1
def add(x:int,y:int) -> int:
    """add 两数相加"""
    return x+y

# def mystery(x:int,y:int) -> int:
#     """mystery 方法是两数加法结果的平方"""
#     return (x+y) * (x+y)

# add_tool = FunctionTool.from_defaults(fn=add)
# mystery_tool = FunctionTool.from_defaults(fn=mystery)

# llm = Ollama(base_url="http://192.168.0.200:11434", model="mistral") # deepseek-r1:8b不支持
# response = llm.predict_and_call(
#     [add_tool,mystery_tool],
#     "Tell me the output of the add function on 2 and 9",
#     verbose=True
# ) # 默认带print
# print(str(response))

# 测试例2
# documents = SimpleDirectoryReader(input_files=["./homework/data/Java面试题.pdf"]).load_data()
# splitter = SentenceSplitter(chunk_size=1024)
# nodes = splitter.get_nodes_from_documents(documents)
# print(nodes[7].get_content(metadata_mode="all"))

# Settings.embed_model = HuggingFaceEmbedding(model_name="E:/huggingface_cache/BAAI/bge-small-zh-v1.5")
# Settings.llm = Ollama(base_url="http://192.168.0.200:11434", model="deepseek-r1:8b")

# index = VectorStoreIndex(nodes=nodes)
# query_engine = index.as_query_engine(
#         similarity_top_k=2,
#         filters=MetadataFilters.from_dicts(
#             [
#                 {"key":"page_label","value":"2"}
#             ]
#         )
#     )
# response = query_engine.query(
#     "第二页主要讲的是哪个类型的题目"
# )
# print(str(response))

# 正例
# documents = SimpleDirectoryReader(input_files=["./homework/data/Java面试题.pdf"]).load_data()
# splitter = SentenceSplitter(chunk_size=1024)
# nodes = splitter.get_nodes_from_documents(documents)

# # Settings.embed_model = HuggingFaceEmbedding(model_name="E:/huggingface_cache/BAAI/bge-small-zh-v1.5")
# # Settings.llm = Ollama(base_url="http://192.168.0.200:11434", model="mistral")

# def vector_query(query: str,page_numbers: List[str]) -> str:
#     """
#     演示通过向量索引查询
#     query(str): 查询的内容
#     page_numbers:(List[str])：过滤页数，如果查整个文档的内容，空着就行，不然就填具体的页数
#     """

#     metadata_dicts = [
#         {"key":"page_label","value":p} for p in page_numbers
#     ]

#     index = VectorStoreIndex(nodes=nodes)
#     query_engine = index.as_query_engine(
#             similarity_top_k=2, # 决定了模型检索时返回的相似结果数量，越小答案越准确，越大答案越发散
#             filters=MetadataFilters.from_dicts(
#                 metadata_dicts,
#                 condition=FilterCondition.OR
#             )
#         )
#     response = query_engine.query(query)
#     return response

# vector_query_tool = FunctionTool.from_defaults(fn=vector_query)
# add_tool = FunctionTool.from_defaults(fn=add)

# llm = Ollama(base_url="http://192.168.0.200:11434", model="mistral")
# response = llm.predict_and_call(
#     tools=[vector_query_tool,add_tool],
#     user_msg="2+22等于几"
#     # "告诉我第四页主要讲了什么内容",
# ) # verbose=True默认带print
# print(str(response))

# 官方例子

# from util.weather import get_request
import requests

def get_request(url, params=None, headers=None):
    response = requests.get(url, params=params, headers=headers)
    if response.status_code == 200:
        return response.json()  # 返回JSON格式的响应数据
    else:
        return None  # 处理错误情况

class Song:
    name:str
    artist:str

def generate_song(name: str, artist: str) -> Song:
    """Generates a song with provided name and artist."""
    return {"name": name, "artist": artist}

def weather(code: int,extensions: str) -> any:
    """获取code对应的天气情况，其中extensions的枚举为base/all，base表示当天，all表示未来三天"""
    url = "https://restapi.amap.com/v3/weather/weatherInfo"
    params = {"key": "3d95d4f97a3e2066f4089dea9c4eb1b0", "city": code, "extensions": extensions} # 实时
    data = get_request(url, params=params, headers={})
    return data

def combine_task(code: int,extensions: str) -> any:
    """获取code对应的天气情况，其中extensions的枚举为base/all，base表示当天，all表示未来三天"""
    w = weather(code,extensions)
    a = add(len(w['forecasts'][0]['casts']),1)
    return {"weather":w,"add_res":a}


generate_song_tool = FunctionTool.from_defaults(fn=generate_song,name="generate_song")
add_tool = FunctionTool.from_defaults(fn=add,name="add")
weather_tool = FunctionTool.from_defaults(fn=weather,name="weather")
combine_task_tool = FunctionTool.from_defaults(fn=combine_task,name="combine_task")

llm = Ollama(base_url="http://192.168.0.200:11434", model="mistral")
# ret1 = llm.predict_and_call(
#     [weather_tool,add_tool],
#     "获取code=320100的未来的天气",
#     verbose=True
# )
# print(str(ret1))

# ret2 = llm.predict_and_call(
#     [add_tool],
#     "1+2=?",
#     verbose=True
# )
# print(str(ret2))

ret3 = llm.predict_and_call(
    [combine_task_tool],
    "获取code=320100的未来的天气",
    verbose=True
)
print(str(ret3))