import gradio as gr
import openai
import json
import os
import shutil
import re
import io
import contextlib
import traceback
from pathlib import Path
from copy import deepcopy
import requests
import threading
import http.server
import socketserver

# API endpoint and model path
API_BASE = "http://localhost:8000/v1"
MODEL_PATH = "/fs/fast/u2023000922/zhangshaolei/checkpoints/0827.new3.100/"

# Initialize OpenAI client
client = openai.OpenAI(
    base_url=API_BASE,
    api_key="dummy"  # VLLM typically doesn't require an API key, but set a dummy one
)

# Workspace directory
WORKSPACE_DIR = "workspace"
HTTP_SERVER_PORT = 8100  # HTTP server port
HTTP_SERVER_BASE = f"http://10.77.110.215:{HTTP_SERVER_PORT}"  # Your server IP and port

def start_http_server():
    handler = http.server.SimpleHTTPRequestHandler
    with socketserver.TCPServer(("", HTTP_SERVER_PORT), handler) as httpd:
        print("HTTP Server serving at port 8100")
        httpd.serve_forever()

# Start HTTP server in a separate thread
threading.Thread(target=start_http_server, daemon=True).start()

# Function to collect file info
def collect_file_info(directory: str) -> str:
    all_file_info_str = ""
    dir_path = Path(directory)
    files = sorted([f for f in dir_path.iterdir() if f.is_file()])
    for idx, file_path in enumerate(files, start=1):
        size_bytes = os.path.getsize(file_path)
        size_kb = size_bytes / 1024
        size_str = f"{size_kb:.1f}KB"
        file_info = {
            "name": file_path.name,
            "size": size_str
        }
        file_info_str = json.dumps(file_info, indent=4, ensure_ascii=False)
        all_file_info_str += f"File {idx}:\n{file_info_str}\n\n"
    return all_file_info_str

# Function to add files to workspace
def add_files(uploaded_files, workspace):
    os.makedirs(WORKSPACE_DIR, exist_ok=True)
    if uploaded_files:
        if not isinstance(uploaded_files, list):
            uploaded_files = [uploaded_files]
        for f in uploaded_files:
            dest_path = os.path.join(WORKSPACE_DIR, os.path.basename(f.name))
            shutil.copy(f.name, dest_path)
            if dest_path not in workspace:
                workspace.append(dest_path)
    return workspace

# Function to clear workspace
def clear_workspace(workspace):
    for file_path in workspace:
        if os.path.exists(file_path):
            os.remove(file_path)
    return []

def get_file_icon(extension):
    ext = extension.lower()
    if ext in ['.jpg', '.jpeg', '.png', '.gif', '.bmp']:
        return '🖼️'  # Image
    elif ext == '.pdf':
        return '📕'  # PDF
    elif ext in ['.doc', '.docx']:
        return '📘'  # Word
    elif ext == '.txt':
        return '📄'  # Plain text
    elif ext == '.md':
        return '📝'  # Markdown
    elif ext in ['.csv', '.xlsx']:
        return '📊'  # Spreadsheet
    elif ext in ['.json', '.sqlite']:
        return '🗄️'  # Data / database
    elif ext in ['.mp4', '.avi', '.mov']:
        return '🎥'  # Video
    elif ext in ['.mp3', '.wav']:
        return '🎵'  # Audio
    elif ext in ['.zip', '.rar', '.tar']:
        return '🗜️'  # Archive
    else:
        return '📁'  # Generic

def render_workspace(workspace):
    if not workspace:
        return '<div style="border: 1px solid #ccc; padding: 10px; border-radius: 5px;"><p>No files in workspace.</p></div>'
    
    file_list = "<ul style='list-style: none; padding: 0; margin: 0;'>"
    for idx, file in enumerate(workspace):
        basename = os.path.basename(file)
        extension = Path(basename).suffix.lower()
        icon = get_file_icon(extension)
        bg_color = "#f5f5f5" if idx % 2 == 0 else "#e8e8e8"
        file_url = f"{HTTP_SERVER_BASE}/workspace/{basename}"
        download_link = f'<a href="{file_url}" download="{basename}" style="text-decoration: none; color: #007bff;">Download</a>'
        preview_link = ''
        if extension in ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.pdf', '.txt', '.doc', '.docx','.csv', '.xlsx']:
            preview_link = f'<a href="{file_url}" target="_blank" style="text-decoration: none; color: #28a745; margin-left: 10px;">Preview</a>'
        file_list += f'<li style="padding: 8px; background-color: {bg_color};">{icon} {basename} - {download_link}{preview_link}</li>'
    file_list += "</ul>"
    
    html = f'''
    <div style="border: 1px solid #ccc; padding: 10px; border-radius: 5px;">
        <h3 style="margin-top: 0;">Workspace Files:</h3>
        <div id="workspace-scroll" style="max-height: 500px; overflow-y: auto;">
            {file_list}
        </div>
    </div>
    <script>
        var el = document.getElementById("workspace-scroll");
        if (el) {{
            el.scrollTop = el.scrollHeight;
        }}
    </script>
    '''
    return html


def render_action_list(history):
    import re
    
    if not history or not history[-1][-1]:
        return '<div style="border: 1px solid #ccc; padding: 10px; border-radius: 5px;"><p>No actions to display.</p></div>'
    
    text = history[-1][-1][0] if isinstance(history[-1][-1], tuple) else history[-1][-1]
    action_contents = []
    last_action = None
    tag_colors = {
        "Analyze": "red",
        "Understand": "blue",
        "Code": "green",
        "Execute": "brown",
        "Answer": "purple",
    }
    
    # Extract all actions, including potentially incomplete last tag
    pattern = r"<(Analyze|Understand|Code|Execute|Answer)>(.*?)(?:</\1>|(?=$))"
    matches = re.findall(pattern, text, re.DOTALL)
    for action, _ in matches:
        action_contents.append(action)
        last_action = action

    if "</Answer>" in text:
        last_action="Finished"
    # Status box (outside scrollable area)
    status_text = f"DeepAnalyze Action {len(action_contents)}: {last_action if last_action else 'None'}..."
    html = f'<div style="border: 1px solid #ccc; padding: 10px; border-radius: 5px;">'
    html += f'<h3 style="margin-top: 0; margin-bottom: 10px;">{status_text}</h3>'
    
    # Scrollable pipeline container
    html += '<div id="action-scroll" style="max-height: 700px; overflow-y: auto; display: flex; flex-direction: column; align-items: center;">'
    
    # Pipeline
    for idx, action in enumerate(action_contents):
        color = tag_colors.get(action, "#e0e0e0")
        # Rectangle with action tag
        html += f'''
        <div style="margin: 5px 0; width: 200px; text-align: center;">
            <div style="background-color: {color}; padding: 5px; border-radius: 5px; border: 1px solid #999; color: white; font-weight: bold;">
                {action}
            </div>
        </div>
        '''
        # Add arrow between actions
        if idx < len(action_contents) - 1:
            html += '<div style="font-size: 20px; color: #666;">↓</div>'
    
    html += '</div>'
    html += '''
    <script>
    var el = document.getElementById("action-scroll");
    if (el) {
        el.scrollTop = el.scrollHeight;
    }
    </script>
    '''
    html += '</div>'
    return html

# Function to execute code
def execute_code(code_str):
    stdout_capture = io.StringIO()
    stderr_capture = io.StringIO()
    try:
        with contextlib.redirect_stdout(stdout_capture), contextlib.redirect_stderr(stderr_capture):
            exec(code_str, {})
        output = stdout_capture.getvalue()
        if stderr_capture.getvalue():
            output += stderr_capture.getvalue()
        return output
    except Exception as exec_error:
        code_lines = code_str.splitlines()
        tb_lines = traceback.format_exc().splitlines()
        error_line = None
        for line in tb_lines:
            if 'File "<string>", line' in line:
                try:
                    line_num = int(line.split(", line ")[1].split(",")[0])
                    error_line = line_num
                    break
                except (IndexError, ValueError):
                    continue
        error_message = f"Traceback (most recent call last):\n"
        if error_line is not None and 1 <= error_line <= len(code_lines):
            error_message += f'  File "<string>", line {error_line}, in <module>\n'
            error_message += f'    {code_lines[error_line-1].strip()}\n'
        error_message += f"{type(exec_error).__name__}: {str(exec_error)}"
        if stderr_capture.getvalue():
            error_message += f"\n{stderr_capture.getvalue()}"
        return f"[Error]:\n{error_message.strip()}"

# Function to handle user message
def user(user_message, history):
    if user_message:
        history.append((user_message, None))  # User message as text
    return "", history

def fix_code_block(content):
    def fix_text(text):
        stack = []
        lines = text.splitlines(keepends=True)
        result = []
        for line in lines:
            stripped = line.strip()
            if stripped.startswith("```python"):
                if stack and stack[-1] == "```python":
                    result.append("```\n")
                    stack.pop()
                stack.append("```python")
                result.append(line)
            elif stripped == "```":
                if stack and stack[-1] == "```python":
                    stack.pop()
                result.append(line)
            else:
                result.append(line)
        while stack:
            result.append("```\n")
            stack.pop()
        return "".join(result)
    if isinstance(content, str):
        return fix_text(content)
    elif isinstance(content, tuple):
        text_part = content[0] if content[0] else ""
        return (fix_text(text_part), content[1])
    return content

def make_color(messages):
    messages=deepcopy(messages)
    tag_colors = {
        "Analyze": "red",
        "Understand": "blue",
        "Code": "green",
        "Execute": "brown",
        "Answer": "purple",
    }
    color_messages = []
    for item in messages:
        if item[-1] is not None:
            if isinstance(item[-1], str):
                text = item[-1]
                for tag, color in tag_colors.items():
                    text = text.replace(
                        f"<{tag}>",
                        f'<b><span style="color:{color}">&lt;{tag}&gt;</span></b>'
                    )
                    text = text.replace(
                        f"</{tag}>",
                        f'<b><span style="color:{color}">&lt;/{tag}&gt;</span></b>'
                    )
                item[-1] = text
            else:
                text = item[-1][0]
                for tag, color in tag_colors.items():
                    text = text.replace(
                        f"<{tag}>",
                        f'<b><span style="color:{color}">&lt;{tag}&gt;</span></b>'
                    )
                    text = text.replace(
                        f"</{tag}>",
                        f'<b><span style="color:{color}">&lt;/{tag}&gt;</span></b>'
                    )
                item[-1] = (text, item[-1][1])
        color_messages.append(item)
    return color_messages

# Modified bot function to yield action list HTML
def bot(history, workspace):
    original_cwd = os.getcwd()
    os.makedirs(WORKSPACE_DIR, exist_ok=True)
    messages = []
    for msg in history:
        if isinstance(msg[0], str) and msg[1] is None:  # User message
            messages.append({"role": "user", "content": msg[0]})
        else:  # Assistant message
            content = msg[1][0] if isinstance(msg[1], tuple) and len(msg[1]) > 0 else (msg[1] if isinstance(msg[1], str) else "")
            messages.append({"role": "assistant", "content": content})
    if messages and messages[-1]["role"] == "user":
        user_message = messages[-1]["content"]
        file_info = collect_file_info(WORKSPACE_DIR) if workspace else ""
        messages[-1]["content"] = f"# Instruction\n{user_message}\n\n# Data\n{file_info}\nUse <Analyze>, <Understand>, <Code>, <Execute>, <Answer> tags."
    initial_workspace = set(workspace)
    history.append([None, ""])
    finished = False
    exe_output = None
    while not finished:
        response = client.chat.completions.create(
            model=MODEL_PATH,
            messages=messages,
            temperature=0.0,
            stream=True,
            extra_body={
                "add_generation_prompt": False,
                "stop_token_ids": [151676, 151645],
                "max_new_tokens": 32768,
            }
        )
        cur_res = ""
        for chunk in response:
            if chunk.choices and chunk.choices[0].delta.content is not None:
                delta = chunk.choices[0].delta.content
                cur_res += delta
                history[-1][-1] = history[-1][1] + delta
                _history = deepcopy(history)
                _history[-1] = [None, fix_code_block(history[-1][1])]
                yield make_color(_history), workspace, render_workspace(workspace), render_action_list(history)
            if "</Answer>" in cur_res:
                finished = True
                break
        if chunk.choices[0].finish_reason == "stop" and not finished:
            cur_res += "</Code>"
            messages.append({"role": "assistant", "content": cur_res})
            history[-1][-1] += "</Code>"
            _history = deepcopy(history)
            _history[-1] = [None, fix_code_block(history[-1][1])]
            yield make_color(_history), workspace, render_workspace(workspace), render_action_list(history)
        # messages.append({"role": "assistant", "content": cur_res})
        if "</Code>" in cur_res and not finished:
            
            code_match = re.search(r"<Code>(.*?)</Code>", cur_res, re.DOTALL)
            if code_match:
                code_content = code_match.group(1).strip()
                md_match = re.search(r"```(?:python)?(.*?)```", code_content, re.DOTALL)
                code_str = md_match.group(1).strip() if md_match else code_content
                # import pdb;pdb.set_trace()
                os.chdir(WORKSPACE_DIR)
                exe_output = execute_code(code_str)
                messages.append({"role": "execute", "content": f"{exe_output}"})
                history[-1][-1] += f"\n<Execute>\n```\n{exe_output}\n```\n</Execute>\n"
                os.chdir(original_cwd)
                current_files = set([os.path.join(WORKSPACE_DIR, f) for f in os.listdir(WORKSPACE_DIR) if os.path.isfile(os.path.join(WORKSPACE_DIR, f))])
                new_files = list(current_files - initial_workspace)
                if new_files:
                    workspace.extend(new_files)
                    initial_workspace.update(new_files)
                yield make_color(history), workspace, render_workspace(workspace), render_action_list(history)
                os.chdir(original_cwd)
    os.chdir(original_cwd)
    yield make_color(history), workspace, render_workspace(workspace), render_action_list(history)

with gr.Blocks() as demo:
    if os.path.exists(WORKSPACE_DIR):
        shutil.rmtree(WORKSPACE_DIR)
    workspace_state = gr.State([])
    
    # # Define action_list at the top to ensure it's available
    # action_list = gr.HTML()
    
    with gr.Row():
        # Left column: File upload and workspace
        with gr.Column(scale=6):
            file_upload = gr.File(label="Upload Files", file_count="multiple", height=150)
            with gr.Row():
                upload_btn = gr.Button("上传", scale=0.4)
                gr.HTML("<div style='width: 2px;'></div>")
                clear_btn = gr.Button("清空", scale=0.4)
            workspace_display = gr.HTML()
            upload_btn.click(add_files, inputs=[file_upload, workspace_state], outputs=workspace_state).then(
                render_workspace, inputs=workspace_state, outputs=workspace_display
            )
            clear_btn.click(clear_workspace, inputs=workspace_state, outputs=workspace_state).then(
                render_workspace, inputs=workspace_state, outputs=workspace_display
            )
        with gr.Column(scale=2):
            action_list = gr.HTML()
        # Middle column: Chatbot
        with gr.Column(scale=11):
            chatbot = gr.Chatbot(height=650)
            with gr.Row():
                with gr.Group():
                    msg = gr.Textbox(placeholder="Enter your message...", show_label=False, scale=2, container=False)
                    with gr.Row():
                        send_btn = gr.Button("Send", scale=1)
                        clear_chat_btn = gr.Button("Clear", scale=1)
            send_btn.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
                bot, [chatbot, workspace_state], [chatbot, workspace_state, workspace_display, action_list]
            )
            clear_chat_btn.click(lambda: [], outputs=chatbot)
        # Right column: Action list
        # with gr.Column(scale=1):
        #     # action_list is already defined, just reference it
        #     action_list
            
    demo.load(
        fn=lambda ws: (render_workspace(ws), render_action_list([])),
        inputs=workspace_state,
        outputs=[workspace_display, action_list]
    )

demo.launch(server_name="0.0.0.0")

# import gradio as gr
# import openai
# import json
# import os
# import shutil
# import re
# import io
# import contextlib
# import traceback
# from pathlib import Path
# from copy import deepcopy
# import requests
# import threading
# import http.server
# import socketserver

# # API endpoint and model path
# API_BASE = "http://localhost:8000/v1"
# MODEL_PATH = "/fs/fast/u2023000922/zhangshaolei/checkpoints/0827.new3.100/"

# # Initialize OpenAI client
# client = openai.OpenAI(
#     base_url=API_BASE,
#     api_key="dummy"  # VLLM typically doesn't require an API key, but set a dummy one
# )

# # Workspace directory
# WORKSPACE_DIR = "workspace"
# HTTP_SERVER_PORT = 8100  # HTTP server port
# HTTP_SERVER_BASE = f"http://10.77.110.215:{HTTP_SERVER_PORT}"  # Your server IP and port

# def start_http_server():
#     # os.chdir("workspace")  # HTTP Server root is workspace
#     handler = http.server.SimpleHTTPRequestHandler
#     with socketserver.TCPServer(("", 8100), handler) as httpd:
#         print("HTTP Server serving at port 8100")
#         httpd.serve_forever()

# # Start HTTP server in a separate thread
# threading.Thread(target=start_http_server, daemon=True).start()

# # Function to collect file info
# def collect_file_info(directory: str) -> str:
#     all_file_info_str = ""
#     dir_path = Path(directory)

#     files = sorted([f for f in dir_path.iterdir() if f.is_file()])

#     for idx, file_path in enumerate(files, start=1):
#         size_bytes = os.path.getsize(file_path)
#         size_kb = size_bytes / 1024
#         size_str = f"{size_kb:.1f}KB"

#         file_info = {
#             "name": file_path.name,
#             "size": size_str
#         }
#         file_info_str = json.dumps(file_info, indent=4, ensure_ascii=False)
#         all_file_info_str += f"File {idx}:\n{file_info_str}\n\n"

#     return all_file_info_str

# # Function to add files to workspace
# def add_files(uploaded_files, workspace):
#     os.makedirs(WORKSPACE_DIR, exist_ok=True)
#     if uploaded_files:
#         if not isinstance(uploaded_files, list):
#             uploaded_files = [uploaded_files]
#         for f in uploaded_files:
#             dest_path = os.path.join(WORKSPACE_DIR, os.path.basename(f.name))
#             shutil.copy(f.name, dest_path)
#             if dest_path not in workspace:
#                 workspace.append(dest_path)
#     return workspace

# # Function to clear workspace
# def clear_workspace(workspace):
#     for file_path in workspace:
#         if os.path.exists(file_path):
#             os.remove(file_path)
#     return []

# def get_file_icon(extension):
#     ext = extension.lower()
#     if ext in ['.jpg', '.jpeg', '.png', '.gif', '.bmp']:
#         return '🖼️'  # Image
#     elif ext == '.pdf':
#         return '📕'  # PDF
#     elif ext in ['.doc', '.docx']:
#         return '📘'  # Word
#     elif ext == '.txt':
#         return '📄'  # Plain text
#     elif ext == '.md':
#         return '📝'  # Markdown
#     elif ext in ['.csv', '.xlsx']:
#         return '📊'  # Spreadsheet
#     elif ext in ['.json', '.sqlite']:
#         return '🗄️'  # Data / database
#     elif ext in ['.mp4', '.avi', '.mov']:
#         return '🎥'  # Video
#     elif ext in ['.mp3', '.wav']:
#         return '🎵'  # Audio
#     elif ext in ['.zip', '.rar', '.tar']:
#         return '🗜️'  # Archive
#     else:
#         return '📁'  # Generic

# def render_workspace(workspace):
#     if not workspace:
#         return '<div style="border: 1px solid #ccc; padding: 10px; border-radius: 5px;"><p>No files in workspace.</p></div>'
    
#     file_list = "<ul style='list-style: none; padding: 0; margin: 0;'>"
#     for idx, file in enumerate(workspace):
#         basename = os.path.basename(file)
#         extension = Path(basename).suffix.lower()
#         icon = get_file_icon(extension)
#         bg_color = "#f5f5f5" if idx % 2 == 0 else "#e8e8e8"
        
#         # Download and preview links point to HTTP server
#         file_url = f"{HTTP_SERVER_BASE}/workspace/{basename}"
#         download_link = f'<a href="{file_url}" download="{basename}" style="text-decoration: none; color: #007bff;">Download</a>'
#         preview_link = ''
#         if extension in ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.pdf', '.txt', '.doc', '.docx','.csv', '.xlsx']:
#             preview_link = f'<a href="{file_url}" target="_blank" style="text-decoration: none; color: #28a745; margin-left: 10px;">Preview</a>'
        
#         file_list += f'<li style="padding: 8px; background-color: {bg_color};">{icon} {basename} - {download_link}{preview_link}</li>'
    
#     file_list += "</ul>"
#     return f'<div style="border: 1px solid #ccc; padding: 10px; border-radius: 5px;"><h3 style="margin-top: 0;">Workspace Files:</h3>{file_list}</div>'

# # Function to execute code
# def execute_code(code_str):
#     stdout_capture = io.StringIO()
#     stderr_capture = io.StringIO()
#     try:
#         with contextlib.redirect_stdout(stdout_capture), contextlib.redirect_stderr(stderr_capture):
#             exec(code_str, {})
#         output = stdout_capture.getvalue()
#         if stderr_capture.getvalue():
#             output += stderr_capture.getvalue()
#         return output
#     except Exception as exec_error:
#         code_lines = code_str.splitlines()
#         tb_lines = traceback.format_exc().splitlines()
#         error_line = None
#         for line in tb_lines:
#             if 'File "<string>", line' in line:
#                 try:
#                     line_num = int(line.split(", line ")[1].split(",")[0])
#                     error_line = line_num
#                     break
#                 except (IndexError, ValueError):
#                     continue
#         error_message = f"Traceback (most recent call last):\n"
#         if error_line is not None and 1 <= error_line <= len(code_lines):
#             error_message += f'  File "<string>", line {error_line}, in <module>\n'
#             error_message += f'    {code_lines[error_line-1].strip()}\n'
#         error_message += f"{type(exec_error).__name__}: {str(exec_error)}"
#         if stderr_capture.getvalue():
#             error_message += f"\n{stderr_capture.getvalue()}"
#         return f"[Error]:\n{error_message.strip()}"

# # Function to handle user message
# def user(user_message, history):
#     if user_message:
#         history.append((user_message, None))  # User message as text
#     return "", history

# def fix_code_block(content):
#     def fix_text(text):
#         stack = []
#         lines = text.splitlines(keepends=True)
#         result = []
#         for line in lines:
#             stripped = line.strip()
#             if stripped.startswith("```python"):
#                 if stack and stack[-1] == "```python":
#                     result.append("```\n")
#                     stack.pop()
#                 stack.append("```python")
#                 result.append(line)
#             elif stripped == "```":
#                 if stack and stack[-1] == "```python":
#                     stack.pop()
#                 result.append(line)
#             else:
#                 result.append(line)
#         while stack:
#             result.append("```\n")
#             stack.pop()
#         return "".join(result)

#     if isinstance(content, str):
#         return fix_text(content)
#     elif isinstance(content, tuple):
#         text_part = content[0] if content[0] else ""
#         return (fix_text(text_part), content[1])
#     return content

# def make_color(messages):
#     tag_colors = {
#         "Analyze": "red",
#         "Understand": "blue",
#         "Code": "green",
#         "Execute": "brown",
#         "Answer": "purple",
#     }

#     color_messages = []
#     for item in messages:
#         if item[-1] is not None:
#             if isinstance(item[-1], str):
#                 text = item[-1]
#                 for tag, color in tag_colors.items():
#                     text = text.replace(
#                         f"<{tag}>",
#                         f'<b><span style="color:{color}">&lt;{tag}&gt;</span></b>'
#                     )
#                     text = text.replace(
#                         f"</{tag}>",
#                         f'<b><span style="color:{color}">&lt;/{tag}&gt;</span></b>'
#                     )
#                 item[-1] = text
#             else:
#                 text = item[-1][0]
#                 for tag, color in tag_colors.items():
#                     text = text.replace(
#                         f"<{tag}>",
#                         f'<b><span style="color:{color}">&lt;{tag}&gt;</span></b>'
#                     )
#                     text = text.replace(
#                         f"</{tag}>",
#                         f'<b><span style="color:{color}">&lt;/{tag}&gt;</span></b>'
#                     )
#                 item[-1] = (text, item[-1][1])
#         color_messages.append(item)
#     return color_messages

# # Modified bot function to yield workspace HTML
# def bot(history, workspace):
#     original_cwd = os.getcwd()
#     os.makedirs(WORKSPACE_DIR, exist_ok=True)

#     # Prepare messages from history
#     messages = []
#     for msg in history:
#         if isinstance(msg[0], str) and msg[1] is None:  # User message
#             messages.append({"role": "user", "content": msg[0]})
#         else:  # Assistant message
#             content = msg[1][0] if isinstance(msg[1], tuple) and len(msg[1]) > 0 else (msg[1] if isinstance(msg[1], str) else "")
#             messages.append({"role": "assistant", "content": content})

#     if messages and messages[-1]["role"] == "user":
#         # Format the last user message with instruction and data
#         user_message = messages[-1]["content"]
#         file_info = collect_file_info(WORKSPACE_DIR) if workspace else ""
#         messages[-1]["content"] = f"# Instruction\n{user_message}\n\n# Data\n{file_info}"

#     # Track current workspace files
#     initial_workspace = set(workspace)

#     # Append an empty assistant response to history
#     history.append([None, ""])  # Multimodal: (None, bot_content)

#     finished = False
#     exe_output = None

#     while not finished:
#         # Make the API call with streaming
#         response = client.chat.completions.create(
#             model=MODEL_PATH,
#             messages=messages,
#             temperature=0.0,
#             stream=True,
#             extra_body={
#                 "add_generation_prompt": False,
#                 "stop_token_ids": [151676, 151645],
#                 "max_new_tokens": 32768,
#             }
#         )

#         cur_res = ""

#         for chunk in response:
#             if chunk.choices and chunk.choices[0].delta.content is not None:
#                 delta = chunk.choices[0].delta.content
#                 cur_res += delta
#                 # Update the last history entry
#                 history[-1][-1] = history[-1][1] + delta
#                 _history = deepcopy(history)
#                 _history[-1] = [None, fix_code_block(history[-1][1])]
#                 yield make_color(_history), workspace, render_workspace(workspace)

#             if "</Answer>" in cur_res:
#                 finished = True
#                 break

#         if chunk.choices[0].finish_reason == "stop" and not finished:
#             cur_res += "</Code>"
#             messages.append({"role": "assistant", "content": cur_res})
#             history[-1][-1] += "</Code>"
#             _history = deepcopy(history)
#             _history[-1] = [None, fix_code_block(history[-1][1])]
#             yield make_color(_history), workspace, render_workspace(workspace)

#         # Check for code block after streaming
#         messages.append({"role": "assistant", "content": cur_res})
        
#         if "</Code>" in cur_res and not finished:
#             code_match = re.search(r"<Code>(.*?)</Code>", cur_res, re.DOTALL)
#             if code_match:
#                 code_content = code_match.group(1).strip()
#                 md_match = re.search(r"```(?:python)?(.*?)```", code_content, re.DOTALL)
#                 code_str = md_match.group(1).strip() if md_match else code_content
#                 os.chdir(WORKSPACE_DIR)

#                 # Execute code
#                 exe_output = execute_code(code_str)
#                 messages.append({"role": "execute", "content": f"{exe_output}"})
                
#                 history[-1][-1] += f"\n<Execute>\n```\n{exe_output}\n```\n</Execute>\n"
#                 os.chdir(original_cwd)
                
#                 # Check for new files in workspace
#                 current_files = set([os.path.join(WORKSPACE_DIR, f) for f in os.listdir(WORKSPACE_DIR) if os.path.isfile(os.path.join(WORKSPACE_DIR, f))])
#                 new_files = list(current_files - initial_workspace)
#                 if new_files:
#                     workspace.extend(new_files)
#                     initial_workspace.update(new_files)
                
#                 # Yield updated history, workspace, and rendered workspace HTML
#                 _history = deepcopy(history)
#                 _history[-1] = [None, fix_code_block(history[-1][1])]
#                 yield make_color(_history), workspace, render_workspace(workspace)

#                 os.chdir(original_cwd)

#     os.chdir(original_cwd)
#     yield make_color(history), workspace, render_workspace(workspace)

# with gr.Blocks() as demo:
#     if os.path.exists(WORKSPACE_DIR):
#         shutil.rmtree(WORKSPACE_DIR)
#     # State for workspace files
#     workspace_state = gr.State([])

#     # Main layout: Two columns
#     with gr.Row():
#         # Left column: File upload and workspace display
#         with gr.Column(scale=1):
#             file_upload = gr.File(label="Upload Files", file_count="multiple", height=200)
#             with gr.Row():
#                 upload_btn = gr.Button("上传", scale=0.4)
#                 gr.HTML("<div style='width: 10px;'></div>")
#                 clear_btn = gr.Button("清空", scale=0.4)
            
#             workspace_display = gr.HTML()

#             upload_btn.click(add_files, inputs=[file_upload, workspace_state], outputs=workspace_state).then(
#                 render_workspace, inputs=workspace_state, outputs=workspace_display
#             )
            
#             clear_btn.click(clear_workspace, inputs=workspace_state, outputs=workspace_state).then(
#                 render_workspace, inputs=workspace_state, outputs=workspace_display
#             )

#         # Right column: Chatbot
#         with gr.Column(scale=3):
#             chatbot = gr.Chatbot(height=600)
#             with gr.Row():
#                 with gr.Group():
#                     msg = gr.Textbox(placeholder="Enter your message...", show_label=False, scale=4, container=False)
#                     with gr.Row():
#                         send_btn = gr.Button("Send", scale=1)
#                         clear_chat_btn = gr.Button("Clear", scale=1)
            
#             # Submit on button click: user then bot
#             send_btn.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
#                 bot, [chatbot, workspace_state], [chatbot, workspace_state, workspace_display]
#             )
            
#             clear_chat_btn.click(lambda: [], outputs=chatbot)

#     demo.load(render_workspace, inputs=workspace_state, outputs=workspace_display)

# demo.launch(server_name="0.0.0.0")


# 在整体UI的右边添加一列（action_list）。他整体是一个html组件，用于展示history[1][-1]中的action
# 如果history[1][-1]不是str，text=history[1][-1][0],否则text=history[1][-1]
# text中包含5中action（tag）：Analyze、Understand、Code、Execute、Answer
# 每个action的内容用对应的tag包起来，例如analyze_context位于<Analyze>... </Analyze>之中，code_context位于<Code>... </Code>之中，以此类推



# 其中：
# 1. 最上面是一个状态框，文本展示“DeepAnalyze Action： {当前text中最后一个动作}...”
# 2. 下面是一个从上到下的pipeline，记录了text从最开始到当前的Action链路
# pipeline每个中的每个元素是一个矩形，矩形中是文字Action，点击矩形能展开展示对应的action_context（可关闭的）。
# pipeline中的每个元素用箭头连接

# 实现方式：使用类似于render_workspace(workspace)和workspace_display的方式实时更新action_list

# def get_file_icon(extension):
#     if extension in ['.jpg', '.jpeg', '.png', '.gif', '.bmp']:
#         return '🖼️'  # Image icon
#     elif extension in ['.pdf', '.doc', '.docx', '.txt']:
#         return '📄'  # Document icon
#     elif extension in ['.mp4', '.avi', '.mov']:
#         return '🎥'  # Video icon
#     elif extension in ['.mp3', '.wav']:
#         return '🎵'  # Audio icon
#     elif extension in ['.zip', '.rar', '.tar']:
#         return '🗜️'  # Archive icon
#     else:
#         return '📁'  # Generic file icon

# 增加一些csv、md、json、xlsx、sqlite格式的icon



# 修改这段代码，现在这个代码的workspace中的download和preview都有问题。因为我是将前端部署在远程服务器上。
# 现在下载和预览链接都是这个，没法下载和预览http://10.77.110.215:7860/file=workspace/fraud_by_amount.png

# 除了gradio的端口，另外在启动开始的时候单独建立一个8100 端口（python3 -m http.server 8100），预览和下载的路径应该都是http://10.77.110.215:8100/hourly_transactions.png


# import gradio as gr
# import openai
# import json
# import os
# import shutil
# import re
# import io
# import contextlib
# import traceback
# from pathlib import Path
# from copy import deepcopy
# # API endpoint and model path
# API_BASE = "http://localhost:8000/v1"
# MODEL_PATH = "/fs/fast/u2023000922/zhangshaolei/checkpoints/0827.new3.100/"

# # Initialize OpenAI client
# client = openai.OpenAI(
#     base_url=API_BASE,
#     api_key="dummy"  # VLLM typically doesn't require an API key, but set a dummy one
# )

# # Workspace directory
# WORKSPACE_DIR = "workspace"

# # Function to collect file info
# def collect_file_info(directory: str) -> str:
#     all_file_info_str = ""
#     dir_path = Path(directory)

#     files = sorted([f for f in dir_path.iterdir() if f.is_file()])

#     for idx, file_path in enumerate(files, start=1):
#         size_bytes = os.path.getsize(file_path)
#         size_kb = size_bytes / 1024
#         size_str = f"{size_kb:.1f}KB"

#         file_info = {
#             "name": file_path.name,
#             "size": size_str
#         }
#         file_info_str = json.dumps(file_info, indent=4, ensure_ascii=False)
#         all_file_info_str += f"File {idx}:\n{file_info_str}\n\n"

#     return all_file_info_str

# # Function to add files to workspace
# def add_files(uploaded_files, workspace):
#     os.makedirs(WORKSPACE_DIR, exist_ok=True)
#     if uploaded_files:
#         if not isinstance(uploaded_files, list):
#             uploaded_files = [uploaded_files]
#         for f in uploaded_files:
#             dest_path = os.path.join(WORKSPACE_DIR, os.path.basename(f.name))
#             shutil.copy(f.name, dest_path)
#             if dest_path not in workspace:
#                 workspace.append(dest_path)
#     return workspace

# # Function to clear workspace
# def clear_workspace(workspace):
#     for file_path in workspace:
#         if os.path.exists(file_path):
#             os.remove(file_path)
#     return []

# # Function to render workspace files as HTML list with styled rows
# def render_workspace(workspace):
#     if not workspace:
#         return '<div style="border: 1px solid #ccc; padding: 10px; border-radius: 5px;"><p>No files in workspace.</p></div>'
#     file_list = "<ul style='list-style: none; padding: 0; margin: 0;'>"
#     for idx, file in enumerate(workspace):
#         basename = os.path.basename(file)
#         # Alternate background colors
#         bg_color = "#f5f5f5" if idx % 2 == 0 else "#e8e8e8"
#         download_link = f'<a href="/file={file}" download="{basename}" style="text-decoration: none; color: #007bff;">Download</a>'
#         file_list += f'<li style="padding: 8px; background-color: {bg_color};">{basename} - {download_link}</li>'
#     file_list += "</ul>"
#     return f'<div style="border: 1px solid #ccc; padding: 10px; border-radius: 5px;"><h3 style="margin-top: 0;">Workspace Files:</h3>{file_list}</div>'

# # Function to execute code
# def execute_code(code_str):
#     stdout_capture = io.StringIO()
#     stderr_capture = io.StringIO()
#     try:
#         with contextlib.redirect_stdout(stdout_capture), contextlib.redirect_stderr(stderr_capture):
#             exec(code_str, {})
#         output = stdout_capture.getvalue()
#         if stderr_capture.getvalue():
#             output += stderr_capture.getvalue()
#         return output
#     except Exception as exec_error:
#         code_lines = code_str.splitlines()
#         tb_lines = traceback.format_exc().splitlines()
#         error_line = None
#         for line in tb_lines:
#             if 'File "<string>", line' in line:
#                 try:
#                     line_num = int(line.split(", line ")[1].split(",")[0])
#                     error_line = line_num
#                     break
#                 except (IndexError, ValueError):
#                     continue
#         error_message = f"Traceback (most recent call last):\n"
#         if error_line is not None and 1 <= error_line <= len(code_lines):
#             error_message += f'  File "<string>", line {error_line}, in <module>\n'
#             error_message += f'    {code_lines[error_line-1].strip()}\n'
#         error_message += f"{type(exec_error).__name__}: {str(exec_error)}"
#         if stderr_capture.getvalue():
#             error_message += f"\n{stderr_capture.getvalue()}"
#         return f"[Error]:\n{error_message.strip()}"

# # Function to handle user message (append to history without formatting)
# def user(user_message, history):
#     if user_message:
#         history.append({"role": "user", "content": user_message})
#     return "", history

# # Function to check if content ends with an unpaired code block
# def fix_code_block(content):
#     if content.count("```python") % 2 == 1 and content.strip().endswith("```python"):
#         return content + "\n```"
#     return content

# # Function to handle bot response with streaming
# def bot(history, workspace):
#     original_cwd = os.getcwd()
#     os.makedirs(WORKSPACE_DIR, exist_ok=True)
    

#     # Prepare messages from history
#     messages = history[:]  # Copy the history
#     if messages and messages[-1]["role"] == "user":
#         # Format the last user message with instruction and data
#         user_message = messages[-1]["content"]
#         file_info = collect_file_info(WORKSPACE_DIR) if workspace else ""
#         messages[-1]["content"] = f"# Instruction\n{user_message}\n\n# Data\n{file_info}"

#     # Append an empty assistant message to history
#     # history.append({"role": "assistant", "content": ""})

#     finished=False
#     # Track current workspace files
#     initial_workspace = set(workspace)
#     history.append({"role": "assistant", "content": ""})

#     while not finished:
        
#         # Make the API call with streaming using OpenAI client
        
#         response = client.chat.completions.create(
#             model=MODEL_PATH,
#             messages=messages,
#             temperature=0.0,
#             stream=True,
#             stop=["</Code>"]
#         )
        


#         # Parse the streaming response
#         cur_res=""
#         for chunk in response:
#             if chunk.choices and chunk.choices[0].delta.content is not None:
#                 history[-1]["content"] += chunk.choices[0].delta.content
#                 cur_res+=chunk.choices[0].delta.content
#                 _history=deepcopy(history)
#                 _history[-1]["content"] = fix_code_block(history[-1]["content"])
#                 yield _history, workspace


#             if "</Answer>" in cur_res:
#                 print(cur_res)
#                 finished=True
#                 break
#             # print(chunk.choices[0].finish_reason)
#             if chunk.choices[0].finish_reason=="stop" and not finished:
#                 history[-1]["content"]+="</Code>"
#                 cur_res+="</Code>"
#                 print(cur_res)
#                 messages.append({"role": "assistant", "content":cur_res})
#                 yield make_color(history), workspace
#                 # import pdb;pdb.set_trace()
            
            
            
            

#             # import pdb;pdb.set_trace()
#             # Check if response contains </Code>
#             if "</Code>" in messages[-1]["content"] and not finished:
#                 # messages.append({"role": "assistant", "content": history[-1]["content"]})
                
#                 code_match = re.search(r"<Code>(.*?)</Code>", history[-1]["content"], re.DOTALL)
#                 if code_match:
#                     code_content = code_match.group(1).strip()
#                     md_match = re.search(r"```(?:python)?(.*?)```", code_content, re.DOTALL)
#                     code_str = md_match.group(1).strip() if md_match else code_content
#                     os.chdir(WORKSPACE_DIR)
#                     # import pdb;pdb.set_trace()


#                     # import pdb;pdb.set_trace()
#                     # print(code_str)
#                     # Execute code
#                     exe_output = execute_code(code_str)
#                     # print(exe_output)

#                     # messages.append({"role": "assistant", "content": history[-1]["content"]})
#                     # Append execution result to history
#                     messages.append({"role": "execute", "content": f"{exe_output}"})

#                     history[-1]["content"] +=f"\n<Execute>\n{exe_output}\n</Execute>\n"
#                     yield make_color(history), workspace

#                     os.chdir(original_cwd)
#                     # Check for new files in workspace
#                     current_files = set([os.path.join(WORKSPACE_DIR, f) for f in os.listdir(WORKSPACE_DIR) if os.path.isfile(os.path.join(WORKSPACE_DIR, f))])
#                     new_files = list(current_files - initial_workspace)
#                     if new_files:
#                         workspace.extend(new_files)
#                         initial_workspace.update(new_files)


#     os.chdir(original_cwd)
#     yield make_color(history), workspace

# with gr.Blocks() as demo:
#     # State for workspace files
#     workspace_state = gr.State([])

#     # Main layout: Two columns
#     with gr.Row():
#         # Left column: File upload and workspace display
#         with gr.Column(scale=1):
#             # Upper part: File upload and buttons
#             file_upload = gr.File(label="Upload Files", file_count="multiple")
#             with gr.Row():
#                 upload_btn = gr.Button("上传")
#                 gr.HTML("<div style='width: 10px;'></div>")  # Spacer
#                 clear_btn = gr.Button("清空")
            
#             # Lower part: Workspace display
#             workspace_display = gr.HTML()

#             # Event for upload button: Add files and update display
#             upload_btn.click(add_files, inputs=[file_upload, workspace_state], outputs=workspace_state).then(
#                 render_workspace, inputs=workspace_state, outputs=workspace_display
#             )
            
#             # Event for clear button: Clear and update display
#             clear_btn.click(clear_workspace, inputs=workspace_state, outputs=workspace_state).then(
#                 render_workspace, inputs=workspace_state, outputs=workspace_display
#             )

#         # Right column: Chatbot
#         with gr.Column(scale=2):
#             chatbot = gr.Chatbot(type="messages", label="Chatbot")
#             with gr.Row():
#                 with gr.Group():
#                     msg = gr.Textbox(placeholder="Enter your message...", show_label=False, scale=4, container=False)
#                     with gr.Row():
#                         send_btn = gr.Button("Send", scale=1)
#                         gr.HTML("<div style='width: 10px;'></div>")  # Spacer
#                         clear_chat_btn = gr.Button("Clear", scale=1)
            
#             # Submit on button click: user then bot
#             send_btn.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
#                 bot, [chatbot, workspace_state], [chatbot, workspace_state]
#             ).then(
#                 render_workspace, inputs=workspace_state, outputs=workspace_display
#             )
            
#             # Clear chat history
#             clear_chat_btn.click(lambda: [], outputs=chatbot)

#     # Initial render of workspace
#     demo.load(render_workspace, inputs=workspace_state, outputs=workspace_display)

# # demo.launch()
# demo.launch(server_name="0.0.0.0")


# 现在这段代码的chatbot展示不太准确，总是在中间卡住。
# 帮我修改一下history的yield逻辑。
# chatbot换成多模态对话，对话框调大一点。
# 文件上传的组件大小调成固定的（现在上传之后高度会变化）
# workspace展示最好在丰富一些，为不同的文件类型加一些icon，点击可以弹出一个框展示内容


# 修改：
# 1. 对Prompt的修改不要体现在history中，而是在调用API的时候
# 2. send_btn、clear_chat_btn要和msg = gr.Textbox在一个组件中
# 3. Workspace Files:增加一个外框，每一行用不同深浅的灰色交替作为背景（类似文件系统目录的展示方式）
# 4. Parse the streaming response的时候，判断结尾部分是否在代码块中（之前都是```python...```最后有一个不成对的```python），如果是的话，在结尾暂时加入"\n```"来支撑markdown语法


# 修改这段代码，继续完善：
# 1. 将send_btn和clear_chat_btn中间隔开，变成upload_btn，clear_btn这样
# 2. 整体chat的逻辑稍微修改一下：
# 整体还是流式输出，但是到生成"</Code>"之后需要停止。来在workspace中，execute_code执行代码，再把执行结果放到messages中，继续流式输出。
# 整体逻辑可以参考下面这个代码（但是需要流式输出）
# 注意：在每次把执行结果放到messages时，要检测workspace是否有新增的文件，更新前端workspace的内容




# class DeepAnalyzeVLLM:
#     def __init__(self, model_name, api_url="http://localhost:8000/v1/chat/completions", temperature=0.4, top_p=0.95, top_k=200, max_tokens=32768, max_rounds=50):
#         self.model_name = model_name
#         self.api_url = api_url
#         self.temperature = temperature
#         self.top_p = top_p
#         self.top_k = top_k
#         self.max_tokens = max_tokens
#         self.max_rounds = max_rounds

#     def execute_code(self, code_str):
#         stdout_capture = io.StringIO()
#         stderr_capture = io.StringIO()
#         try:
#             with contextlib.redirect_stdout(stdout_capture), contextlib.redirect_stderr(stderr_capture):
#                 exec(code_str, {})
#             output = stdout_capture.getvalue()
#             if stderr_capture.getvalue():
#                 output += stderr_capture.getvalue()
#             return output
#         except Exception as exec_error:
#             code_lines = code_str.splitlines()
#             tb_lines = traceback.format_exc().splitlines()
#             error_line = None
#             for line in tb_lines:
#                 if 'File "<string>", line' in line:
#                     try:
#                         line_num = int(line.split(", line ")[1].split(",")[0])
#                         error_line = line_num
#                         break
#                     except (IndexError, ValueError):
#                         continue
#             error_message = f"Traceback (most recent call last):\n"
#             if error_line is not None and 1 <= error_line <= len(code_lines):
#                 error_message += f'  File "<string>", line {error_line}, in <module>\n'
#                 error_message += f'    {code_lines[error_line-1].strip()}\n'
#             error_message += f"{type(exec_error).__name__}: {str(exec_error)}"
#             if stderr_capture.getvalue():
#                 error_message += f"\n{stderr_capture.getvalue()}"
#             return f"[Error]:\n{error_message.strip()}"

#     def generate(self, prompt, workspace, prompt1=""):
#         original_cwd = os.getcwd()
#         os.chdir(workspace)
#         reasoning = ""
#         messages = []
#         response_message = []
        
#         for attempt in range(5):
#             try:
#                 messages = [
#                     # {"role": "system", "content": "You are a skilled data scientist with long thinking and Python code generation."},
#                     {"role": "user", "content": prompt}
#                 ]
#                 response_message = []

#                 for round_idx in range(self.max_rounds):
#                     # print(f"{attempt}.{round_idx}")
#                     # Prepare the API request payload
#                     payload = {
#                         "model": self.model_name,
#                         "messages": messages,
#                         "temperature": self.temperature,
#                         "top_p": self.top_p,
#                         "top_k": self.top_k,
#                         "max_tokens": self.max_tokens,
#                         "add_generation_prompt":False,
#                         "stop": ["</Code>"]
#                     }
                    
#                     # Send request to vLLM API
#                     response = requests.post(
#                         self.api_url,
#                         headers={"Content-Type": "application/json"},
#                         json=payload
#                     )
#                     response.raise_for_status()  # Raise an error for bad status codes
#                     response_data = response.json()
#                     # import pdb;pdb.set_trace()
                    
#                     # Extract the generated content
#                     ans = response_data["choices"][0]["message"]["content"]

#                     if response_data["choices"][0]['stop_reason']=="</Code>":
#                         ans+="</Code>"

#                     response_message.append(ans)

#                     code_match = re.search(r"<Code>(.*?)</Code>", ans, re.DOTALL)
#                     if not code_match:
#                         print("No <Code> block detected, ending loop.")
#                         break
#                     if "<Answer>" in ans:
#                         print("Generated final answer.")
#                         break

#                     code_content = code_match.group(1).strip()
#                     md_match = re.search(r"```(?:python)?(.*?)```", code_content, re.DOTALL)
#                     code_str = md_match.group(1).strip() if md_match else code_content

#                     exe_output = self.execute_code(code_str)
#                     response_message.append(f"<Execute>\n{exe_output}\n</Execute>")

#                     messages.append({"role": "assistant", "content": ans})
#                     # messages.append({"role": "user", "content": f"# The output after executing the above code is:\n<Execute>\n{exe_output}\n</Execute>"})
#                     messages.append({"role": "execute", "content": exe_output})

#                 reasoning = "\n".join(response_message)
#                 break
#             except Exception as e:
#                 print(f"Attempt {attempt+1} failed: {str(e)}")
#                 reasoning = "\n".join(response_message)
#                 if attempt == 4:  # Last attempt
#                     reasoning += f"\n[Error]: Failed after 5 attempts: {str(e)}"

#         os.chdir(original_cwd)
#         results = {
#             "reasoning": reasoning,
#         }
#         return results