# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the 【火山方舟】原型应用软件自用许可协议
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#     https://www.volcengine.com/docs/82379/1433703
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import asyncio
import logging
import json
import gradio as gr
from gradio import ChatMessage
from pydantic import BaseModel
from openai import OpenAI
from typing import Any, Generator, AsyncGenerator

from config import (ARK_API_KEY, API_ADDR, API_BOT_ID)
from arkitect.core.component.llm.model import ArkChatRequest, ArkMessage
from search_engine.volc_bot import VolcBotSearchEngine
from search_engine.tavily import TavilySearchEngine
from search_engine.bailian_bot import BailianBotSearchEngine

# recommend to use DeepSeek-R1 model
REASONING_EP_ID = "deepseek-r1-250120"
# default set to volc bot, if using tavily, change it into "tavily"
SEARCH_ENGINE = "bailian" #"volc_bot"
# optional, if you select tavily as search engine, please configure this
TAVILY_API_KEY = "{YOUR_TAVILY_API_KEY}"
# optional, if you select volc bot as search engine, please configure this
SEARCH_BOT_ID = "bot-20250522214322-zbhw9"

from deep_search import DeepSearch, ExtraConfig

client = OpenAI(base_url=API_ADDR, api_key=ARK_API_KEY)
search_engine = VolcBotSearchEngine(bot_id=SEARCH_BOT_ID)

if "tavily" == SEARCH_ENGINE:
    search_engine = TavilySearchEngine(api_key=TAVILY_API_KEY)
elif "bailian" == SEARCH_ENGINE:
    search_engine = BailianBotSearchEngine(bot_id=SEARCH_BOT_ID)

deep_research = DeepSearch(
    search_engine=search_engine,
    planning_endpoint_id=REASONING_EP_ID,
    summary_endpoint_id=REASONING_EP_ID,
    extra_config=ExtraConfig(
        max_planning_rounds=5,
        max_search_words=5,
    )
)

logging.basicConfig(
    level=logging.INFO, format="[%(asctime)s][%(levelname)s] %(message)s"
)
LOGGER = logging.getLogger(__name__)
def clean_json_string(json_str):
    # 允许的控制字符：制表符(0x09)、换行符(0x0A)、回车符(0x0D)
    allowed = {9, 10, 13}
    return ''.join(
        c for c in json_str
        if ord(c) >= 32 or ord(c) in allowed
    )
# store the search records
search_records = []

global_css = """
.search-panel {
  height: 800px;
  overflow: auto;
  border: 1px solid #ccc;
  padding: 10px;
}

.chat-col {
  height: 100%;
}
"""


def update_search_panel():
    """生成右侧搜索面板的HTML内容"""
    html = "<div class='search-panel'>"
    for i, record in enumerate(search_records, 1):
        if isinstance(record, BaseModel):
            record = record.model_dump(exclude_none=True, exclude_unset=True)
        html += f"""
        <div class='search-result-container'>
            <strong>🔍search [{record.get('query')}]</strong>
            <div class='results'>
                <strong>📖results: ({len(record.get('search_references', []))}):</strong>
                <ol>{"".join([f"<li><a href={res.get('url', '')}>{res.get('title', '')}</a></li>"
                              for res in record.get('search_references', [])])}</ol>
            </div>
        </div>
        """
    html += "</div>"
    return gr.HTML(value=html)


async def stream_chat(message: str,
                history: list,
                ) -> AsyncGenerator[list[tuple[str, str]], None]:
    global search_records

    message = f"""你是一个资深积木乐高专栏作家，写作流程：
1.基于新闻，请找出2个和新闻相关的主题（不要关心品牌，年份等细节，主题应该是简单的5个字以内）。例如：新闻"三叉戟的八格车来袭！玛莎拉蒂授权积木实测：咬合力拉满，鲨鱼腮却失踪了？【测评】" ，那么主题就是“八格车”“莎拉蒂”。
2.然后基于关键词进行至少一次搜索资料补充：搜索相关新闻主题进行资料补充（例如，怎样拼装[主题],查找[主题]等等）， 整理资料不要复杂（简单补充则可，资料相关就可以，搜集资料能够写出文章就可以，同时每一个主题至少搜集一次补充资料）
3.最后：按以上整理的所有内容写一个吸引人的文章【描写热点主题相关能够让观众感到兴趣和愉快】，文章内容包括今天热点主题介绍+补充的相关知识资料，文章主题结构：【主题1+补充1，主题2+补充2，...】
4.注意：务必要保留搜索到的相关图片，分析主题不要过多关注（品牌，定价，用户评价，具体细节）。

## 格式示例:
```
# 今日乐高MOC热点主题速递：XXX
## 主题1：双头鲨鱼
- 具体介绍主题1：双头鲨鱼的情况。
  尽可能详细介绍主题1，包括： 主题1的特点，主题1的应用场景， 主题1的相关图片等等。
  列出图片（ 如"image_arr_new"或"cover_url"图片)，图片格式能够展示给用户看到 
- 主题1相关补充资料：
  补充资料的图片展示：图片1,图片2...相关图片都展示给用户看到 ， 

主题2：XXXX
......
文章结语
XXX
```
最后：给你提供今天小红书的积木热点新闻材料：###  

  {message}
    

 ###，"""

    history.clear()
    search_records = []

    sum_reasoning_content = ""
    sum_content = ""
    sum_search_content = ""
    planning_rounds = 0

    thinking_msg = ChatMessage(content=sum_reasoning_content,
                               metadata={"title": f"🤔 thinking round {planning_rounds}",
                                         "id": f"thinking-round-{planning_rounds}", "status": "pending"})

    searching_msg = ChatMessage(content="",
                                metadata={"title": f"🔍 searching round {planning_rounds}",
                                          "id": f"searching-round-{planning_rounds}", "status": "pending"})
    history.append(thinking_msg)

    # chat_completion = client.chat.completions.create(
    #     model=API_BOT_ID,
    #     messages=[
    #         {
    #             "role": "user",
    #             "content": message,
    #         }
    #     ],
    #     stream=True,
    # )
    request = ArkChatRequest(
        model=API_BOT_ID,
        messages=[
            ArkMessage(
                role="user",
                content=message
            )
        ]
    )
    async for rsp in deep_research.astream_deep_research(
        request=request,
        question=message,
    ):
        # round vars
        reasoning_content = rsp.choices[0].delta.reasoning_content if hasattr(rsp.choices[0].delta,
                                                                              'reasoning_content') else ''
        content = rsp.choices[0].delta.content if hasattr(rsp.choices[0].delta, 'content') else ''
        metadata = getattr(rsp, 'metadata', {})

        if metadata:
            logging.info(f"metadata: {rsp.metadata}")
            search_state = metadata.get('search_state', '')
            search_keywords = metadata.get('search_keywords', [])
            search_results = metadata.get('search_results', [])

            if search_state == 'searching':
                # think round ended
                thinking_msg.metadata.update({"status": "done"})
                yield history, update_search_panel()
                # clear thinking content
                sum_reasoning_content = ""

                # search round started
                sum_search_content += "\n【搜索关键词】\n" + "\n\n".join(search_keywords) + "\n"
                searching_msg.content = sum_search_content
                history.append(searching_msg)
                yield history, update_search_panel()
            elif search_state == 'searched':
                sum_search_content += "\n【搜索总结】\n\n"
                for search_result in search_results:
                    sum_search_content += f"\n {search_result.summary_content} \n"
                searching_msg.content = sum_search_content
                searching_msg.metadata.update({"status": "done"})
                search_records += search_results
                yield history, update_search_panel()
                sum_search_content = ""
                sum_reasoning_content = ""
                planning_rounds += 1
                # new msgs
                thinking_msg = ChatMessage(content=sum_reasoning_content,
                                           metadata={"title": f"🤔 thinking round {planning_rounds}",
                                                     "id": f"thinking-round-{planning_rounds}", "status": "pending"})
                searching_msg = ChatMessage(content="",
                                            metadata={"title": f"🔍 searching round {planning_rounds}",
                                                      "id": f"searching-round-{planning_rounds}", "status": "pending"})
                history.append(thinking_msg)
                yield history, update_search_panel()

        if reasoning_content:
            sum_reasoning_content += reasoning_content
            thinking_msg.content = sum_reasoning_content
            thinking_msg.metadata.update({"status": "pending"})
            yield history, update_search_panel()
        elif content:
            thinking_msg.metadata.update({"status": "done"})
            sum_content += content
            yield [*history, ChatMessage(
                content=sum_content,
                role="assistant",
            )], update_search_panel()


if __name__ == "__main__":
    with gr.Blocks(css=global_css) as demo:
        references = gr.HTML(render=False)
        with gr.Row(min_height=900, height=900):
            with gr.Column(scale=2, elem_classes='chat-col'):
                gr.Markdown("<center><h2>🤖 积木乐高写作 Deep Research</h2></center>")
                gr.Markdown("> **1,直接在对话框提供你的积木相关信息，AI就会帮忙写文章。2，快捷按钮，点击以下ABC按钮，AI给你选题，试试。")
                
                # 添加3个按钮
                with gr.Row():
                    btn_a = gr.Button("A")
                    btn_b = gr.Button("B")
                    btn_c = gr.Button("C")
                
                # 按钮点击事件处理
                def handle_button_click(btn_id):
                    import requests
                    try:
                        response =  """
{
  "count": 30,
  "result": [ {
      "_id": "6834b41a167bb9d9e1b12a5a",
      "note_id": "68312154000000002101b45f",
      "title": "用25块底板搭建超大型乐高动物园",
      "user_nickname": "有趣砖块",
      "user_avatar": "https://sns-avatar-qc.xhscdn.com/avatar/1040g2jo30tgb84qaks005navkvbg85pbisl7ek8?imageView2/2/w/80/format/jpg",
      "user_id": "5d5fa7d7000000000100172b",
      "liked_count": "3216",
      "collected_count": "2956",
      "comment_count": "28",
      "shared_count": "109",
      "cover_url": "https://legoallbricks.oss-cn-shenzhen.aliyuncs.com/2018_12_04_08_02_46_97672400_b8a9141f755f62e1c0ce61244dcd9e7a.jpg",
      "publish_time": 1748284440, 
      "synced": false
    },
    {
      "_id": "6834b41a167bb9d9e1b12a55",
      "note_id": "6830456f000000000f03b145",
      "title": "乐高积木载人机甲 飞翼式动力背包设计预告",
      "user_nickname": "灰白jedi",
      "user_avatar": "https://sns-avatar-qc.xhscdn.com/avatar/641da09da41e0aa3a670021c.jpg?imageView2/2/w/80/format/jpg",
      "user_id": "5e933a8f00000000010071cd",
      "liked_count": "255",
      "collected_count": "146",
      "comment_count": "44",
      "shared_count": "1",
      "cover_url": "https://legoallbricks.oss-cn-shenzhen.aliyuncs.com/2018_12_04_08_02_46_97672400_b8a9141f755f62e1c0ce61244dcd9e7a.jpg",
      "publish_time": 1748284434, 
      "synced": false
    },
    {
      "_id": "6834b41a167bb9d9e1b12a53",
      "note_id": "6831d0b80000000022006a89",
      "title": "把“幻影忍者”们送上太空拯救银河系",
      "user_nickname": "有趣砖块",
      "user_avatar": "https://sns-avatar-qc.xhscdn.com/avatar/1040g2jo30tgb84qaks005navkvbg85pbisl7ek8?imageView2/2/w/80/format/jpg",
      "user_id": "5d5fa7d7000000000100172b",
      "liked_count": "1120",
      "collected_count": "928",
      "comment_count": "32",
      "shared_count": "17",
      "cover_url": "https://legoallbricks.oss-cn-shenzhen.aliyuncs.com/2018_12_04_08_02_46_97672400_b8a9141f755f62e1c0ce61244dcd9e7a.jpg",
      "publish_time": 1748284433, 
      "synced": false
    }  ]
}
"""
                        response = clean_json_string(response)
                        try:
                            data = json.loads(response)
                        except json.JSONDecodeError as e:
                            print(f"错误位置: {e.pos}")
                            print(f"错误附近的字s符: {response[e.pos-20:e.pos+20]}")
                        data = json.loads(response) 
                        print(data)
                        if not data or "result" not in data or len(data["result"]) < 3:
                            return "API返回数据格式不正确或数据不足"
                        
                        if btn_id == "A":
                            selected = data["result"][0]
                            btn_text = selected['title']
                        elif btn_id == "B":
                            selected = data["result"][1]
                            btn_text = selected['title']
                        else:
                            selected = data["result"][2]
                            btn_text = selected['title']
                        
                        return {
                            'title': selected['title'],
                            'content': selected
                        }
                    except requests.exceptions.RequestException as e:
                        return f"请求API出错: {str(e)}"
                    except ValueError as e:
                        return f"解析JSON出错: {str(e)}"
                
                # 按钮hover事件处理
                def handle_button_hover(btn_id):
                    result = handle_button_click(btn_id)
                    if isinstance(result, dict) and 'title' in result:
                        return result['title']
                    return result
                
                # 获取ChatInterface的输入框
                chat_interface = gr.ChatInterface(
                    fn=stream_chat,
                    additional_outputs=[references],
                    type="messages",
                    fill_height=True,
                )
                
                btn_a.click(fn=lambda: handle_button_click("A"), outputs=chat_interface.textbox)
                btn_b.click(fn=lambda: handle_button_click("B"), outputs=chat_interface.textbox)
                btn_c.click(fn=lambda: handle_button_click("C"), outputs=chat_interface.textbox)
                
            with gr.Column(scale=1):
                gr.Markdown("<center><h2>📔 Search Records</h2></center>")
                gr.Markdown("> **searched content will by displayed here**")
                references.render()

    demo.launch()
