import string
from typing import List, TypedDict, Dict

from docx import Document
from docx.shared import Pt
from langchain_community.document_loaders.word_document import UnstructuredWordDocumentLoader
from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper
from langchain_openai.chat_models.base import BaseChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langgraph.constants import END, START
from langgraph.graph import StateGraph


class TechBidState(TypedDict):
    user_query: str               # 用户原始需求（如“政务云安全架构”）
    web_results: List[Dict]       # 网页检索结果 [{url, content, relevance}]
    technical_content: str        # 生成的技术方案文本
    word_path: str                # Word输出路径
    references: List[str]         # 引用来源URL列表:cite[6]:cite[10]
llm =  BaseChatOpenAI(
    model='deepseek-chat',
    openai_api_key='sk-f2388ce4574d44cdafddbaf8aa5d20f5',
    openai_api_base='https://api.deepseek.com',
    max_tokens=1024
)


def word_get_pageCount_node(file: string) :
   loader =  UnstructuredWordDocumentLoader(file, mode="paged");
   docs = loader.load_and_split();
   for doc in docs :
       print(doc.page_content)


def web_retriever_node(state: TechBidState):
    # 1. 调用搜索引擎（配置Tavily高级参数）
    search_tool = TavilySearchAPIWrapper(
        max_results=5,
        include_raw_content=True,  # 获取原始HTML
        domain="csdn.net,zhihu.com"  # 限定政府/技术社区:cite[6]
    )
    raw_results = search_tool.run(state["user_query"])

    # 2. 相关性过滤（LLM评估）
    filtered = []
    for res in raw_results[:10]:  # 仅处理前10条
        prompt = f"""
        评估网页与标书撰写的相关性：
        需求：{state['user_query']}
        网页内容：{res['content'][:1000]}...
        输出JSON：{"relevant": bool, "reason": str}
        """
        rating = llm.invoke(prompt, response_format={"type": "json_object"})
        if rating["relevant"]:
            filtered.append(res)

    # 3. 关键信息提取（避免全文存储）
    processed = []
    for res in filtered:
        prompt = f"提取以下文本中与技术方案相关的核心参数和标准，忽略广告/免责声明：{res['content'][:2000]}"
        tech_data = llm.invoke(prompt)
        processed.append({
            "url": res["url"],
            "tech_data": tech_data
            #,  # 提取后的结构化数据
            # "risk_level": check_risk(tech_data)  # 合规检查
        })

    return {"web_results": processed}

def word_generator_node(state: TechBidState):
    # 1. 组装知识上下文
    # knowledge_ctx = "\n".join(
    #     f"来源[{idx + 1}]：{res['tech_data']} (URL: {res['url']})"
    #     for idx, res in enumerate(state["web_results"])
    # )

    # 2. 生成技术方案（带引用标注）
    prompt_template = ChatPromptTemplate.from_messages([
        ("system", "你是一名技术标书专家，根据权威资料编写方案，你编写的方案内容很详细, 关于本需求,你的 能力可以写2000字的方案，还可以图文并茂。"),
        ("user",  "需求：{query}\n参考资料：\n{ctx}")
    ])
    chain = prompt_template | llm.bind(stop=["```"])

    tech_text = chain.invoke({
        "query": state["user_query"],
        "ctx": "" #knowledge_ctx
    }).content

    # 3. 自动化Word排版
    doc = Document()
    # 标题样式
    doc.add_heading(f"技术方案：{state['user_query']}", level=0)
    # 正文样式
    para = doc.add_paragraph(tech_text)

    para.style.font.size = Pt(10.5)
    # 添加引用表
    doc.add_page_break()
    doc.add_heading("参考文献", level=1)
    for res in state["web_results"]:
        doc.add_paragraph(f"• {res['url']}", style="ListBullet")

    # 4. 保存并更新状态
    output_path = "/Users/huangshibiao/PycharmProjects/pythonProject/test/bid.docx"
    doc.
    doc.save(output_path)
    return {"technical_content": tech_text, "word_path": output_path}

def should_continue(state):
    messages = state["web_results"]
    if len(messages) > 600:
        return "end"
    elif messages[-1].content == "FINISHED":
        return "end"
    else:
        return "continue"


graph_builder = StateGraph(TechBidState)
graph_builder.add_node("web_retriever", web_retriever_node)
graph_builder.add_node("word_generator", word_generator_node)
# Every response from  your chat bot will automatically go to the
# simulated user
graph_builder.add_edge("web_retriever", "word_generator")
# graph_builder.add_conditional_edges(
#     "word_generator",
#     should_continue,
#     # If the finish criteria are met, we will stop the simulation,
#     # otherwise, the virtual user's message will be sent to your chat bot
#     {
#         "end": END,
#         "continue": "web_retriever",
#     },
# )
# The input will first go to your chat bot
graph_builder.add_edge(START, "word_generator")
simulation = graph_builder.compile()
result = simulation.invoke({
    "user_query": "采用“框架+插件”的模式来实现数据引接，并实现UDP和NTRIP的插件",
    "web_results": [],
    "technical_content": "",
    "word_path": ""
})
print(f"生成文档路径：{result['word_path']}")

# for chunk in simulation.stream({"messages": []}):
#     # Print out all events aside from the final end chunk
#     if END not in chunk:
#         print(chunk)
#         print("----")