import asyncio
import json
import os
from typing import Optional

import click
import dotenv
from fastapi.testclient import TestClient
from openai import OpenAI
from prompt_toolkit import PromptSession
from prompt_toolkit.history import FileHistory
from prompt_toolkit.styles import Style
from tree_rag.app import app
from tree_rag.dataclasses.api import WorkflowRequest, WorkflowResponse, WorkflowByAgentRequest
from tree_rag.es import clear_cache as clear_cache_func

dotenv.load_dotenv()

# Initialize OpenAI client (synchronous version)
openai_client = OpenAI(
    base_url=os.environ["OPENAI_API_BASE"],
    api_key=os.environ["OPENAI_API_KEY"],
)

# Define prompt style
style = Style.from_dict(
    {
        "prompt": "ansicyan bold",
        "input": "ansiwhite",
    }
)

# Create history file in user's home directory
history_file = os.path.expanduser(".tree_rag_history")
client = TestClient(app)


def chat_loop(
    tenant_id: Optional[str] = None,
    channel_id: Optional[str] = None,
    conversation_id: Optional[str] = None,
    script_path: Optional[str] = None,
    position: Optional[str] = None,
    name: Optional[str] = None,
    use_llm_for_unknown: bool = True,
    use_qa_as_example: bool = True,
    agent_name: Optional[str] = None,
    strict_mode: bool = False,
    tone: str = "standard",
):
    """Interactive chat loop with the RAG system."""
    print("Starting chat session... (type 'exit' to quit)")
    print("Commands:")
    print("  exit - Exit the chat session")
    print("  clear - Clear the screen")
    print("-" * 50)

    session = PromptSession(
        history=FileHistory(history_file),
        style=style,
        complete_while_typing=True,
    )
    turn_count = 0

    inputs = []
    if script_path:
        with open(script_path, "r") as f:
            inputs = [line.strip() for line in f if line.strip()]

    with client:
        while True:
            # Get user input
            if inputs:
                user_input = inputs.pop(0)
                print("You:", user_input)
            else:
                user_input = session.prompt(
                    [("class:prompt", "You: "), ("class:input", "")],
                    style=style,
                )
                user_input = user_input.strip()

            # Handle special commands
            if user_input.lower() == "exit":
                break
            elif user_input.lower() == "clear":
                os.system("cls" if os.name == "nt" else "clear")
                continue
            elif not user_input:
                continue

            # Use workflow_by_agent if agent_name or channel_id is provided without tenant_id
            if agent_name or (channel_id and not tenant_id):
                # Create workflow_by_agent request
                request_data = {
                    "agent_name": agent_name,
                    "channel_id": channel_id,
                    "user_input": user_input,
                    "conversation_id": conversation_id or "chat-session",
                    "reset": True if turn_count == 0 else False,
                    "use_qa_as_example": use_qa_as_example,
                    "use_llm_for_unknown": use_llm_for_unknown,
                    "strict_mode": strict_mode,
                    "tone": tone,
                }
                
                # Call workflow_by_agent API using test client
                response = client.post("/workflow_by_agent", json=request_data)
                response.raise_for_status()
                workflow_response = WorkflowResponse.model_validate(response.json())
            else:
                # Create workflow request (original logic)
                request = WorkflowRequest(
                    tenant_id=tenant_id,
                    channel_id=channel_id,
                    conversation_id=conversation_id or "chat-session",
                    user_input=user_input,
                    reset=True if turn_count == 0 else False,
                    agent_name=name,
                    position=position,
                    use_llm_for_unknown=use_llm_for_unknown,
                    use_qa_as_example=use_qa_as_example,
                    tone=tone,
                )

                # Call workflow API using test client
                response = client.post("/workflow", json=request.model_dump())
                response.raise_for_status()
                workflow_response = WorkflowResponse.model_validate(response.json())
            
            print(workflow_response.graph)

            # Send to LLM for final response with streaming
            print("\nAssistant: ", end="", flush=True)
            full_response = ""
            for chunk in openai_client.chat.completions.create(
                messages=[
                    {"role": "system", "content": workflow_response.system_prompt},
                    {"role": "user", "content": workflow_response.user_prompt},
                ],
                model=os.getenv("MODEL_NAME"),
                stream=True,
            ):
                content = chunk.choices[0].delta.content
                if content:
                    print(content, end="", flush=True)
                    full_response += content
            print("\n" + "-" * 50)

            # Send callback
            callback_data = {
                "conversation_id": conversation_id or "chat-session",
                "llm_response": full_response,
            }
            callback_response = client.post("/callback", data=callback_data)
            callback_response.raise_for_status()
            turn_count += 1


@click.command()
@click.option("--mapping-file", "-m", help="Path to mapping file")
@click.option("--tenant-name", "-t", help="Tenant name")
@click.option("--conversation-id", "-c", help="Conversation ID (optional)")
@click.option(
    "--script",
    "-s",
    help="Path to script file with predefined inputs",
    type=click.Path(exists=True),
)
@click.option("--clear-cache", "-cc", help="Clear the redis cache", is_flag=True)
@click.option(
    "--use-llm-for-unknown",
    "-u",
    help="Use LLM for unknown questions",
    is_flag=True,
    default=False,
)
@click.option(
    "--use-qa-as-example",
    "-q",
    help="Use QA pairs as examples",
    is_flag=True,
    default=True,
)
@click.option("--position", "-p", help="Position override (default from mapping file)")
@click.option("--name", "-n", help="Agent name override (default from mapping file)")
@click.option("--agent-name", "-a", help="Agent name for workflow_by_agent (alternative to mapping file)")
@click.option("--channel-id", help="Channel ID for workflow_by_agent (alternative to mapping file)")
@click.option("--strict-mode", help="Enable strict mode", is_flag=True, default=False)
@click.option("--tone", help="Reply tone (standard/friendly/patient/trendy)", default="standard")
def main(
    mapping_file: Optional[str],
    tenant_name: Optional[str],
    conversation_id: Optional[str],
    script: Optional[str],
    clear_cache: bool = False,
    use_llm_for_unknown: bool = True,
    use_qa_as_example: bool = True,
    position: Optional[str] = None,
    name: Optional[str] = None,
    agent_name: Optional[str] = None,
    channel_id: Optional[str] = None,
    strict_mode: bool = False,
    tone: str = "standard",
):
    """Interactive chat with RAG system."""
    
    # Use workflow_by_agent mode if agent_name or channel_id is provided
    if agent_name or channel_id:
        if mapping_file or tenant_name:
            print("Warning: mapping file and tenant name options are ignored when using agent_name or channel_id")
        
        if clear_cache:
            asyncio.run(clear_cache_func())

        chat_loop(
            conversation_id=conversation_id,
            script_path=script,
            use_llm_for_unknown=use_llm_for_unknown,
            use_qa_as_example=use_qa_as_example,
            agent_name=agent_name,
            channel_id=channel_id,
            strict_mode=strict_mode,
            tone=tone,
        )
        return

    # Original mapping file mode
    if not mapping_file or not tenant_name:
        raise ValueError("Either mapping file and tenant name, or agent_name/channel_id must be provided")

    # Load mapping file
    with open(mapping_file, "r") as f:
        mapping = json.load(f)

    if tenant_name not in mapping:
        raise ValueError(f"Tenant '{tenant_name}' not found in mapping file")

    tenant_id = mapping[tenant_name]["tenant_id"]
    channel_id_from_mapping = mapping[tenant_name]["channel_id"]

    # Use command line overrides if provided, otherwise use values from mapping file
    agent_position = position or mapping[tenant_name].get("position", "广东省广州市")
    agent_name_from_mapping = name or mapping[tenant_name].get("name", "小智")

    if clear_cache:
        asyncio.run(clear_cache_func())

    chat_loop(
        tenant_id=tenant_id,
        channel_id=channel_id_from_mapping,
        conversation_id=conversation_id,
        script_path=script,
        position=agent_position,
        name=agent_name_from_mapping,
        use_llm_for_unknown=use_llm_for_unknown,
        use_qa_as_example=use_qa_as_example,
        tone=tone,
    )


if __name__ == "__main__":
    main()