import json
import os
import uuid

from langchain import requests, hub
from langchain.agents import initialize_agent, AgentType, create_tool_calling_agent, AgentExecutor
from langchain.chains.retrieval_qa.base import RetrievalQA
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import HumanMessage
from langgraph.prebuilt import create_react_agent
from langchain_chroma import Chroma
from langchain_community.document_loaders import PyPDFLoader, PyMuPDFLoader
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_core import memory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, StringPromptTemplate, PromptTemplate
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
from langchain_core.tools import Tool, BaseTool, create_retriever_tool
from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.tools import BaseTool
from langgraph.checkpoint.memory import MemorySaver
from modelscope.models.cv.image_view_transform.ldm.util_diffusion import checkpoint
from pydantic import BaseModel, Field, PrivateAttr
from typing import Optional, Type, List
import requests

os.environ['LANGCHAIN_TRACING_V2'] = 'true'
os.environ['LANGCHAIN_PROJECT'] = 'LLMDEMO'
os.environ['LANGCHAIN_API_KEY'] = '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'
# 初始化带模板支持的LLM
# pip install redis
llm = ChatOpenAI(
    api_key="sk-a3f7718fb81f43b2915f0a6483b6661b",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    model="llama-4-scout-17b-16e-instruct",  # 此处以qwen-plus为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    # other params...
)
#pip install pypdf
# pip install langchain-chroma
# pip install chroma-hnswlib==0.7.5 chromadb==0.5.4 版本不兼容报错问题
# pip install dashscope


# 工单创建工具
class TicketInput(BaseModel):
    customer_id: str = Field(..., description="客户ID")
    issue: str = Field(..., description="问题描述")
    priority: Optional[str] = Field("medium", description="优先级: low, medium, high")


class CreateTicketTool(BaseTool):
    name:str = "create_ticket"
    description:str = "为客户问题创建支持工单"
    args_schema: Type[BaseModel] = TicketInput

    def _run(self, customer_id: str, issue: str, priority: str = "medium"):
        try:
            response = requests.post(
                "http://localhost:8000/api/tickets",
                json={
                    "customer_id": customer_id,
                    "issue": issue,
                    "priority": priority
                },
                timeout=5
            )
            return f"工单创建成功: {response.json()['ticket_id']}"
        except Exception as e:
            return f"工单创建失败: {str(e)}"

# 1. 加载PDF
print("加载本地向量数据库...")
vectordb = Chroma(embedding_function=DashScopeEmbeddings(),persist_directory="chroma_db")
#加载文档并把向量数据库保存到本地，保存在embedding文件夹下
retriever = RunnableLambda(vectordb.similarity_search).bind(k=1)
print("加载本地向量数据库完成...")
# 定义退款工具
class RefundTool(BaseTool):
    name: str = "process_refund"
    description: str = "处理退款请求，需要order_id和reason"

    def _run(self, json_str: str) -> str:
        try:
            print(json_str)
            data = json.loads(json_str)
            print(f"DEBUG: 调用退款工具，参数={data}")  # 调试日志
            # 这里实际调用API
            print("DEBUG: 模拟调用退款接口")
            return f"已为订单{data['order_id']}发起退款"
        except:
            return "参数格式错误，请提供订单编号和原因"

tool = create_retriever_tool(retriever=retriever, name="knowledge_base", description="用于检索用户提出的问题，并基于检索到的文档内容进行回复")
# 创建工单工具
tools = [tool]
ins = """
你是一个被设计通过查询文档来回答用户问题的助手，请遵循以下规则：
你可能不查询文档也知道答案，但是请务必查询文档，结合文档查询内容回答
如果文档查询不到，请勿编造答案，直接回答：我无法回答您的问题。
"""
base_prompt = """
{ins}
TOOLS:
------------
你可以使用的工具列表如下:
{tools}
请按格式响应：
Question: {input}
Thought: 思考过程
Action: 工具名（从 [{tool_names}] 中选择）
Action Input: 工具输入
Observation: 工具返回结果
Final Answer: 最终答案
开始!
会话历史记录
{chat_history}

Question: {input}
当前状态：
{agent_scratchpad}
"""
prompt = PromptTemplate.from_template(base_prompt)
base = prompt.partial(ins=ins)
print(base)
memory = MemorySaver()

# memory = ConversationBufferMemory(memory_key="chat_history")

tool_agent  = create_react_agent(model=llm, tools=tools,prompt= base,checkpointer=memory)
thread_id = uuid.uuid4()
config = {"configurable": {"thread_id": thread_id}}

# Tell the AI that our name is Bob, and ask it to use a tool to confirm
# that it's capable of working like an agent.
input_message = HumanMessage(content="hi! I'm bob. What is my age?")

for event in tool_agent.stream({"messages": [input_message]}, config, stream_mode="values"):
    event["messages"][-1].pretty_print()
# agent = AgentExecutor(agent=tool_agent,tools=tools,memory=memory,verbose=True,handle_parsing_errors="没有从知识库检索到相似内容")
# print(agent.invoke({"input":"什么是Flink","chat_history":[]}))
