from langchain_core.prompts import ChatPromptTemplate
from langchain_ollama import ChatOllama
from langgraph.graph import StateGraph, START, END
from torch.utils.checkpoint import checkpoint
from typing_extensions import TypedDict, Any
from langchain_community.tools.tavily_search import TavilySearchResults
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import MemorySaver
import requests
import os
import re
from typing_extensions import TypedDict
from langgraph.graph.message import add_messages
from typing import Annotated, Literal
from langchain_core.messages import HumanMessage, AIMessage
from diffusers import DiffusionPipeline, StableDiffusionPipeline
import os
import base64
import torch
import json
import uuid
import outetts

os.environ["TAVILY_API_KEY"] = "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"
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

memory = MemorySaver()

def generate_random_string_from_uuid():
    # 生成一个随机的 UUID
    random_uuid = uuid.uuid4()
    # 将 UUID 转换为字符串并截取前 12 位
    random_string = str(random_uuid).replace("-", "")[:12]
    return random_string


def image_to_base64(image_path):
    """
    将图片文件转换为 Base64 格式
    :param image_path: 图片文件的路径
    :return: Base64 编码的字符串
    """
    try:
        # 打开图片文件并读取内容
        with open(image_path, "rb") as image_file:
            # 将图片内容进行 Base64 编码
            encoded_string = base64.b64encode(image_file.read())
            # 将字节类型转换为字符串类型
            return encoded_string.decode("utf-8")
    except FileNotFoundError:
        print(f"文件未找到：{image_path}")
    except Exception as e:
        print(f"发生错误：{e}")

from outetts.interface import InterfaceHF

def get_audio(query: str):
    """根据描述内容，将其转换为语音文件，并返回地址，该地址外网可以直接播放"""
    # enum = outetts.Models("unsloth/Llama-OuteTTS-1.0-1B".split("/", 1)[1])  # VERSION_1_0_SIZE_1B
    # cfg = outetts.ModelConfig.auto_config(enum, outetts.Backend.HF)
    cfg = outetts.ModelConfig(
        model_path="./app/chains/outeTTS",
        tokenizer_path="./app/chains/outeTTS",
        # backend=outetts.Backend.LLAMACPP,
        quantization=outetts.LlamaCppQuantization.FP16
    )
    tts = outetts.Interface(cfg)
    print(os.getcwd())
    # 加载本地说话人
    speaker = tts.load_default_speaker("EN-FEMALE-1-NEUTRAL")

    fileName = f"{uuid.uuid4().hex}.wav"

    tts.generate(
        outetts.GenerationConfig(
            text=query,
            speaker=speaker,
            generation_type=outetts.GenerationType.CHUNKED,
            sampler_config=outetts.SamplerConfig(
                temperature=0.4
            )
        )
    ).save("./static/" + fileName)
    return "音频地址为：" + "http://127.0.0.1:8080/static/" + fileName


def get_weather(location: str) -> str:
    """用于获取指定城市的天气信息"""
    api_url = f"https://api.weatherapi.com/v1/current.json?key=264efe8419d84d64ab371844251707&q={location}"
    response = requests.get(api_url)
    data = response.json()
    # weather = data["current"]["condition"]["text"]
    return f"{location} 天气信息：{data}"


def get_web(query: str):
    """通过Web搜索获取指定问题的信息"""
    search = TavilySearchResults()
    result = search.run(query)
    return result

pipe = StableDiffusionPipeline.from_pretrained("./app/chains/miniSDdiffusers/", torch_dtype=torch.float16,
                                               allow_pickle=True)
pipe = pipe.to("cuda")
def get_photo(query: str):
    """根据英文描述生成图片内容,并返回图片地址,图片地址可通过markdown语法直接呈现给用户观看"""
    print(os.getcwd())
    # pipe = StableDiffusionPipeline.from_pretrained("./miniSDdiffusers/", torch_dtype=torch.float16, allow_pickle=True)

    prompt = query
    image = pipe(prompt, width=256, height=256).images[0]

    fileName = f"{uuid.uuid4().hex}.png"
    image.save("./static/" + fileName)
    # base64_string = image_to_base64("../../static/image.png")

    return "图片地址为：" + "http://127.0.0.1:8080/static/" + fileName


llm = ChatOllama(
    model="qwen3:4b",
    base_url="http://127.0.0.1:11434",
    temperature=1.0,
)

agent = create_react_agent(
    model=llm,
    tools=[get_weather, get_web, get_photo,get_audio],
)


class MyState(TypedDict):
    input: str
    output: str
    type: str
    messages: Annotated[list, add_messages]


def getChat(state: MyState):
    result = llm.invoke(f"""
    根据下面提供的工具列表对问题进行分类，类别:
    "weather":"用于获取指定城市的天气信息"
    "web":"用于Web在线搜索内容"
    "image":"根据英文描述内容绘制图片"
    "audio":"根据描述内容生成语音"
    "default":"其它"

    问题：{state["input"]}

    只返回类型名称，其它多余信息全都不要
        """)
    state["type"] = result.content
    return state


def removeTag(state: MyState):
    # 使用正则表达式去掉<think>标签及其内容
    state['type'] = re.sub(r'<think>.*?</think>', '', state['type'], flags=re.DOTALL)
    # 去掉所有换行
    state['type'] = state['type'].replace('\n', '')
    return state


def getAnswer(state: MyState):
    result: Any
    result = agent.invoke({"messages": state['messages']})
    state['output'] = result['messages'][-1]

    state['messages'] = [
        {"role": "ai", "content": state['output'].content}]
    return state


# 创建一个状态图
builder = StateGraph(MyState)
# 创建节点
builder.add_node('chat', getChat)
builder.add_node('removeTag', removeTag)
builder.add_node('answer', getAnswer)
# 创建边
builder.add_edge(START, 'chat')
builder.add_edge("chat", 'removeTag')
builder.add_edge("removeTag", "answer")
builder.add_edge("answer", END)
# 编译图
graph = builder.compile(checkpointer=memory)


# # 执行图
# result_state = graph.invoke({"input": "沈阳和天津的温度都是多少"})
#
# # 打印结果
# print(result_state)


def printResponse(input: Any, session: str):
    nodeName = ""

    for chunk in graph.stream(
            {"messages": [{"role": "user", "content": input}], "input": input, "type": "", "output": ""},
            config={"configurable": {"thread_id": session}}, subgraphs=True,
            stream_mode=["messages"]):
        # print(chunk, end="\n", flush=True)
        # print(chunk[2][0].content,end="",flush=True)

        if nodeName == "":
            print(chunk[2][0].content, end="", flush=True)
        elif chunk[2][1]['langgraph_node'] != nodeName:
            print("\n" + chunk[2][0].content, end="", flush=True)
        else:
            print(chunk[2][0].content, end="", flush=True)
        nodeName = chunk[2][1]['langgraph_node']


# printResponse("你好，我叫Bob，我想问今天上海的天气如何？")


# printResponse("你知道我叫什么吗？")
# printResponse("画一只柯基", generate_random_string_from_uuid())


def getResponse(input: str, session: str):
    nodeName = ""

    for chunk in graph.stream(
            {"messages": [{"role": "user", "content": input}], "input": input, "type": "", "output": ""},
            config={"configurable": {"thread_id": session}}, subgraphs=True,
            stream_mode=["messages"]):

        if chunk[2][1]['langgraph_node'] == 'tools' or chunk[2][1]['langgraph_node'] == 'chat':
            continue

        # 构造 JSON 输出
        json_output = {
            "node": chunk[2][1]['langgraph_node'],
            "content": chunk[2][0].content,
        }

        yield json.dumps(json_output, ensure_ascii=False) + "\n"

        # if nodeName == "":
        #     yield chunk[2][0].content
        # elif chunk[2][1]['langgraph_node'] != nodeName:
        #     yield "\n" + chunk[2][0].content
        # else:
        #     yield chunk[2][0].content
        # nodeName = chunk[2][1]['langgraph_node']
