import time
import chainlit as cl
from fastapi import FastAPI
from chainlit.utils import mount_chainlit
from chainlit.types import ThreadDict
from openai import AsyncOpenAI
from mcp import ClientSession
from typing import Dict, Optional
from fastapi import Request, Response
from chainlit.input_widget import Select, Switch, Slider
import pandas as pd
import plotly.graph_objects as go
import matplotlib.pyplot as plt

#请使用自己的apk_key
client = AsyncOpenAI(base_url='https://api.deepseek.com', api_key="sk-605e60a1301040759a821b6b677556fb")
@cl.step
async def parent_step():
    await child_step()
    return "Parent step output"

@cl.step
async def child_step():
    return "Child step output"
 
'''
@cl.header_auth_callback
def header_auth_callback(headers: Dict) -> Optional[cl.User]:
  # Verify the signature of a token in the header (ex: jwt token)
  # or check that the value is matching a row from your database
  if headers.get("test-header") == "test-value":
    return cl.User(identifier="admin", metadata={"role": "admin", "provider": "header"})
  else:
    return None
# chainlit create-secret
@cl.password_auth_callback
def auth_callback(username: str, password: str):
    
    # Fetch the user matching username from your database
    # and compare the hashed password with the value stored in the database
    if (username, password) == ("admin", "admin"):
        return cl.User(
            identifier="admin", metadata={"role": "admin", "provider": "credentials"}
        )
    else:
        return None
'''
@cl.cache
def to_cache():
    time.sleep(20)  # 数据计算中。。。  Simulate a time-consuming process
    return "Hello!"

value = to_cache()

@cl.set_chat_profiles
async def chat_profile(current_user: cl.User):
    '''初始化界面 提示'''

    return [
        cl.ChatProfile(
            name="My Chat Profile",
            icon="https://picsum.photos/250",
            markdown_description="The underlying LLM model is **GPT-3.5**, a *175B parameter model* trained on 410GB of text data.",
            starters=[
                cl.Starter(
                    label="Morning routine ideation",
                    message="Can you help me create a personalized morning routine that would help increase my productivity throughout the day? Start by asking me about my current habits and what activities energize me in the morning.",
                    icon="/public/idea.svg",
                ),
                cl.Starter(
                    label="Explain superconductors",
                    message="Explain superconductors like I'm five years old.",
                    icon="/public/learn.svg",
                ),
            ],
        ),
            cl.ChatProfile(
            name="My Chat ssss",
            icon="https://picsum.photos/250",
            markdown_description="The underlying LLM model is **GPT-3.5**, a *175B parameter model* trained on 410GB of text data.",
            starters=[
                cl.Starter(
                    label="Morning routine ideation",
                    message="Can you help me create a personalized morning routine that would help increase my productivity throughout the day? Start by asking me about my current habits and what activities energize me in the morning.",
                    icon="/public/idea.svg",
                ),
                cl.Starter(
                    label="Explain superconductors",
                    message="Explain superconductors like I'm five years old.",
                    icon="/public/learn.svg",
                ),
            ],
        )
    ]

@cl.set_starters
async def set_starters():
    '''初始化界面 提示'''
    return [
        cl.Starter(
            label="你好",
            message="Can you help me create a personalized morning routine that would help increase my productivity throughout the day? Start by asking me about my current habits and what activities energize me in the morning.",
            icon="/public/idea.svg",
        ),

        cl.Starter(
            label="Explain superconductors",
            message="Explain superconductors like I'm five years old.",
            icon="/public/learn.svg",
        ),
        cl.Starter(
            label="Python script for daily email reports",
            message="Write a script to automate sending daily email reports in Python, and walk me through how I would set it up.",
            icon="/public/terminal.svg",
            command="code",
        ),
        cl.Starter(
            label="Text inviting friend to wedding",
            message="Write a text asking a friend to be my plus-one at a wedding next month. I want to keep it super short and casual, and offer an out.",
            icon="/public/write.svg",
        )
    ]

 
@cl.step(type="tool",name='工具001')
async def tool001():
    '''配置工具'''
    # Simulate a running task
    await cl.sleep(2)

    return "Response 001!"

@cl.step(type="tool")
async def tool002():
    # Simulate a running task
    await cl.sleep(2)

    return "Response 002 the tool!"

@cl.on_chat_start
async def on_chat_start():
    '''
    开始聊天了
    '''
    chat_profile = cl.user_session.get("chat_profile")
    #  给用户 一个动作
    '''
    actions = [
        cl.Action(
            name="action_button",
            icon="mouse-pointer-click",
            payload={"value": "example_value"},
            label="动作 001Click me!"
        )
    ]

    await cl.Message(content="Interact with this action button:", actions=actions).send()
    
    #配置命令
    commands = [
    {"id": "Picture", "icon": "image", "description": "Use DALL-E"},
    {"id": "Search", "icon": "globe", "description": "Find on the web"},
    {
        "id": "Canvas",
        "icon": "pen-line",
        "description": "Collaborate on writing and code",
    },
    ]
    await cl.context.emitter.set_commands(commands)
    
    #配置界面
    settings = await cl.ChatSettings(
        [
            Select(
                id="Model",
                label="OpenAI - Model",
                values=["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-32k"],
                initial_index=0,
            ),
            Switch(id="Streaming", label="OpenAI - Stream Tokens", initial=True),
            Slider(
                id="Temperature",
                label="OpenAI - Temperature",
                initial=1,
                min=0,
                max=2,
                step=0.1,
            ),
            Slider(
                id="SAI_Steps",
                label="Stability AI - Steps",
                initial=30,
                min=10,
                max=150,
                step=1,
                description="Amount of inference steps performed on image generation.",
            ),
            Slider(
                id="SAI_Cfg_Scale",
                label="Stability AI - Cfg_Scale",
                initial=7,
                min=1,
                max=35,
                step=0.1,
                description="Influences how strongly your generation is guided to match your prompt.",
            ),
            Slider(
                id="SAI_Width",
                label="Stability AI - Image Width",
                initial=512,
                min=256,
                max=2048,
                step=64,
                tooltip="Measured in pixels",
            ),
            Slider(
                id="SAI_Height",
                label="Stability AI - Image Height",
                initial=512,
                min=256,
                max=2048,
                step=64,
                tooltip="Measured in pixels",
            ),
        ]
    ).send()
    '''
        # Create a sample DataFrame with more than 10 rows to test pagination functionality
    data = {
        "Name": [
            "Alice",
            "David",
            "Charlie",
            "Bob",
            "Eva",
            "Grace",
            "Hannah",
            "Jack",
            "Frank",
            "Kara",
            "Liam",
            "Ivy",
            "Mia",
            "Noah",
            "Olivia",
        ],
        "Age": [25, 40, 35, 30, 45, 55, 60, 70, 50, 75, 80, 65, 85, 90, 95],
        "City": [
            "New York",
            "Houston",
            "Chicago",
            "Los Angeles",
            "Phoenix",
            "San Antonio",
            "San Diego",
            "San Jose",
            "Philadelphia",
            "Austin",
            "Fort Worth",
            "Dallas",
            "Jacksonville",
            "Columbus",
            "Charlotte",
        ],
        "Salary": [
            70000,
            100000,
            90000,
            80000,
            110000,
            130000,
            140000,
            160000,
            120000,
            170000,
            180000,
            150000,
            190000,
            200000,
            210000,
        ],
    }

    df = pd.DataFrame(data)

    elements = [cl.Dataframe(data=df, display="inline", name="Dataframe")]

    await cl.Message(content="This message has a Dataframe", elements=elements).send()

    

@cl.on_settings_update
async def on_settings_update(settings):
    '''配置更新'''
    print("on_settings_update", settings)
    await cl.Message(
        content="收到 on_settings_update",
    ).send()
    




@cl.action_callback("action_button")
async def action_callback(action: cl.Action):
     await action.remove()
     '''收到用户 动作 '''
     await cl.Message(
        content="收到 click",
    ).send()
    

@cl.on_message
async def on_message(message: cl.Message):
    '''
    收到 客户 信息处理
    '''
    await  cl.Message(
        content="This message has an image!"
    ).send()


    '''
    await cl.Message(
        content=f"Receivessd: {cl.chat_context.to_openai()}",
    ).send()

    tool_res = await tool001()
    await cl.Message(
        content=f"tool_res: {tool_res}",
    ).send()
    tool_res = await tool002()
    await cl.Message(
        content=f"tool_res: {tool_res}",
    ).send()
    await cl.Message(
        content=f"Receivessd: {message.content}",
    ).send()

    image = cl.Image(path="https://www.baidu.com/img/flexible/logo/pc/result.png", name="image1", display="inline")

    await cl.Message(
        content="This message has an image!",
        elements=[image],
    ).send()
 
    elements = [
        cl.Image(path="./result.png", name="image1"),
        
        cl.Text(content="Here is a side text document", name="text1"),
        cl.Text(content="Here is a page text document", name="text2"),
    ]

    # Setting elements will open the sidebar
    await cl.ElementSidebar.set_elements(elements)
   
    actions = [
        cl.Action(
            name="action_button",
            icon="mouse-pointer-click",
            payload={"value": "example_value"},
                    label="动作 002 Click me!"
        )
    ]

    await cl.Message(content="Interact with this action button:", actions=actions).send()
    
    if message.command == "Picture":
     await cl.Message(content="收到命令:").send()

    

    msg = cl.Message(content="....")

    stream = await client.chat.completions.create(
        stream=True, model="deepseek-chat",
        messages=[
          {"role": "system", "content": "You are a helpful assistant."},
          {"role": "user", "content": "请问 1+1 等于几？"}
        ]
    )

    async for part in stream:
        if token := part.choices[0].delta.content or "":
            await msg.stream_token(token)

    await msg.update()
    # 让用户上传文件
    if message.content=='file':
        files = None

            # Wait for the user to upload a file
        while files == None:
            files = await cl.AskFileMessage(
                content="Please upload a text file to begin!", accept=["text/plain"]
            ).send()

        text_file = files[0]

        with open(text_file.path, "r", encoding="utf-8") as f:
            text = f.read()

        # Let the user know that the system is ready
        await cl.Message(
            content=f"`{text_file.name}` uploaded, it contains {len(text)} characters!"
        ).send()
    
   # 中断 让用户选择
    res = await cl.AskActionMessage(
        content="Pick an action!",
        actions=[
            cl.Action(name="continue", payload={"value": "continue"}, label="✅ Continue"),
            cl.Action(name="cancel", payload={"value": "cancel"}, label="❌ Cancel"),
        ],
    ).send()
 
    if res and res.get("payload").get("value") == "continue":
        await cl.Message(
            content="Continue!",
        ).send()
    
    #中断 让用户填写表单
    element = cl.CustomElement(
        name="JiraTicket",
        display="inline",
        props={
            "timeout": 2000,
            "fields": [
                {"id": "summary", "label": "Summary", "type": "text", "required": True},
                {"id": "description", "label": "Description", "type": "textarea"},
                {
                    "id": "due",
                    "label": "Due Date",
                    "type": "date",
                },
                {
                    "id": "priority",
                    "label": "Priority",
                    "type": "select",
                    "options": ["Low", "Medium", "High"],
                    "value": "Medium",
                    "required": True,
                },
            ],
        },
    )
    res = await cl.AskElementMessage(
        content="Create a new Jira ticket:", element=element, timeout=10
    ).send()
    if res and res.get("submitted"):
        await cl.Message(
            content=f"Ticket '{res['summary']}' with priority {res['priority']} submitted"
        ).send()

    
    #异步发送（在处理中）
    answer = await cl.make_async(sync_func)()
    await cl.Message(
        content=answer,
    ).send()
    #接受文件
    await cl.Message(
        content=value,
    ).send()

    images = [file for file in message.elements if "image" in file.mime]
    await cl.Message(content=f"Received {len(images)} image(s)").send()
    
    #消息二次更新
    msg = cl.Message(content="Hello!")
    await msg.send()

    await cl.sleep(2)

    msg.content = "Hello again!"
    await msg.update()
    
    #pdf 文件
    
    elements = [
      cl.Pdf(name="pdf1", display="side", path="./pdf.pdf", page=1)
    ]
    # Reminder: The name of the pdf must be in the content of the message
    await cl.Message(content="Look at this local pdf1!", elements=elements).send()
   

    #mp3 文件
    elements = [
        cl.Audio(name="example.mp3", path="./mp3.mp3", display="inline"),
    ]
    await cl.Message(
        content="Here is an audio file",
        elements=elements,
    ).send()

    #mp4 文件
    elements = [
        cl.Video(name="example.mp4", path="./example.mp4", display="inline"),
    ]
    await cl.Message(
        content="Here is an video file",
        elements=elements,
    ).send()
    
    #生成图表
    fig = go.Figure(
        data=[go.Bar(y=[2, 1, 3])],
        layout_title_text="An example figure",
    )
    elements = [cl.Plotly(name="chart", figure=fig, display="inline")]

    await cl.Message(content="This message has a chart", elements=elements).send()
    # 图表02
    fig, ax = plt.subplots()
    ax.plot([1, 2, 3, 4], [1, 4, 2, 3])

    elements = [
        cl.Pyplot(name="plot", figure=fig, display="inline"),
    ]
    await cl.Message(
        content="Here is a simple plot",
        elements=elements,
    ).send()
    '''
    #按步骤生成
    await parent_step()

    task_list = cl.TaskList()
    task_list.status = "Running..."

    # Create a task and put it in the running state
    task1 = cl.Task(title="Processing data", status=cl.TaskStatus.RUNNING)
    await task_list.add_task(task1)
    # Create another task that is in the ready state
    task2 = cl.Task(title="Performing calculations")
    await task_list.add_task(task2)

    # Optional: link a message to each task to allow task navigation in the chat history
    message = await cl.Message(content="Started processing data").send()
    task1.forId = message.id

    # Update the task list in the interface
    await task_list.send()

    # Perform some action on your end
    await cl.sleep(1)

    # Update the task statuses
    task1.status = cl.TaskStatus.DONE
    task2.status = cl.TaskStatus.FAILED
    task_list.status = "Failed"
    await task_list.send()


def sync_func():
    time.sleep(15)
    return "Hello!"

@cl.on_mcp_connect
async def on_mcp_connect(connection, session: ClientSession):
    '''mpc 服务 连接上了'''
    """Called when an MCP connection is established"""
    # Your connection initialization code here
    # This handler is required for MCP to work
    
@cl.on_mcp_disconnect
async def on_mcp_disconnect(name: str, session: ClientSession):
    '''mpc 服务 挂了'''
    """Called when an MCP connection is terminated"""
    # Your cleanup code here
    # This handler is optional

@cl.on_chat_end
def on_chat_end():
    ''' 会话结束了'''
    print("会话结束了")

@cl.on_chat_resume
async def on_chat_resume(thread: ThreadDict):
    print("The user resumed a previous chat session!")

@cl.on_stop
def on_stop():
    print('end')
    print("The user wants to stop the task!")

@cl.on_logout
def on_logout(request: Request, response: Response):
    response.delete_cookie("my_cookie")
    print('on_logout')
@cl.on_audio_chunk
async def on_audio_chunk(chunk: cl.InputAudioChunk):
    '''收到音频'''
    print('收到音频')
    await  cl.Message(
        content="T收到音频!"
    ).send()
    pass