import re
from pathlib import Path
from typing import Tuple, List
from rich.console import Console
from .ai import (
    openai_chat,
    ollama_chat
)
from .help import (
    model_help,
    list_models,
    print_models
)
from .constants import (
    SUPPORTED_APIS,
    MULTILINE_PROMPT,
    MULTILINE_END,
    HELP_COMMANDS,
    CLEAR_COMMANDS,
    QUIT_COMMANDS,
    FILE_TAG_PATTERN,
    MAX_FILE_SIZE,
    ERROR_MESSAGES,
    SUCCESS_MESSAGES,
    USAGE_MESSAGES
)

console = Console()

def multiline_input() -> str:
    lines: List[str] = []
    console.print('[dim]Enter multiline content (type """ to finish):[/dim]')
    while True:
        try:
            line = input(MULTILINE_PROMPT)
            if line == MULTILINE_END:
                break
            lines.append(line)
        except (KeyboardInterrupt, EOFError):
            console.print('\n[yellow]Multiline input cancelled[/yellow]')
            return ''
    return '\n'.join(lines)

def replace_files_tag(content: str) -> str:
    files = re.findall(FILE_TAG_PATTERN, content)
    if not files:
        return content
    for file_path in files:
        try:
            file_path_obj = Path(file_path)
            if not file_path_obj.exists():
                console.print(f'[yellow]Warning: {ERROR_MESSAGES["file_not_found"].format(file_path=file_path)}[/yellow]')
                continue
            if not file_path_obj.is_file():
                console.print(f'[yellow]Warning: {ERROR_MESSAGES["not_a_file"].format(file_path=file_path)}[/yellow]')
                continue
            # Check file size
            file_size = file_path_obj.stat().st_size
            if file_size > MAX_FILE_SIZE:
                console.print(f'[yellow]Warning: {ERROR_MESSAGES["file_too_large"].format(file_path=file_path)}[/yellow]')
                continue
            # Read file content
            with open(file_path_obj, 'r', encoding='utf-8') as f:
                file_content = f.read()
            # Get file info
            file_name = file_path_obj.name
            file_extension = file_path_obj.suffix.lstrip('.')
            # Escape content for markdown
            escaped_content = file_content.encode('unicode_escape').decode('utf-8')
            # Replace tag with formatted content
            replacement = f'`{file_name}`\n\n```{file_extension}\n{escaped_content}```\n\n'
            content = re.sub(rf'\[{re.escape(file_path)}\][ \n]', replacement, content)
        except (IOError, OSError) as e:
            console.print(f'[red]{ERROR_MESSAGES["read_error"].format(file_path=file_path, error=e)}[/red]')
        except Exception as e:
            console.print(f'[red]Unexpected error processing file {file_path}: {e}[/red]')
    return content

def process_chat_input(content: str, api: str, model: str) -> None:
    """
    Process chat input and send to appropriate API.

    Args:
        content: User input content
        api: API provider name
        model: Model name
    """
    if not content.strip():
        return
    full_content = replace_files_tag(content)
    try:
        if api == 'ollama':
            ollama_chat(content=full_content, model=model)
        elif api in ['openai', 'deepseek', 'tongyi']:
            openai_chat(message=full_content, api=api, model=model)
        else:
            console.print(f'[red]Unsupported API: {api}[/red]')
    except KeyboardInterrupt:
        console.print('\n[yellow]Chat interrupted[/yellow]')
    except Exception as e:
        console.print(f'[red]{ERROR_MESSAGES["chat_error"].format(error=e)}[/red]')

def handle_chat_commands(content: str, api: str, model: str) -> Tuple[str, str]:
    """
    Handle special chat commands.
    
    Args:
        content: User input content
        api: Current API provider
        model: Current model name
        
    Returns:
        Tuple[str, str]: Updated (api, model) tuple
    """
    content = content.strip()
    # Help commands
    if content in HELP_COMMANDS:
        model_help()
        return api, model
    # Clear screen
    if content in CLEAR_COMMANDS:
        console.clear()
        console.print(f'[grey54]Model using: {api}.{model}[/grey54]')
        return api, model
    # Quit application
    if content in QUIT_COMMANDS:
        console.print('[bold green]<<<[/bold green] [bold magenta]Bye![/bold magenta]')
        exit(0)
    # Load new model
    if content.startswith('/load '):
        parts = content.split()
        if len(parts) != 3:
            console.print(f'[red]{USAGE_MESSAGES["load_command"]}[/red]')
            return api, model
        new_api, new_model = parts[1], parts[2]
        if new_api not in SUPPORTED_APIS:
            console.print(f'[red]{ERROR_MESSAGES["invalid_api"].format(api=new_api)}[/red]')
            return api, model
        available_models = list_models(new_api)
        if new_model not in available_models:
            console.print(f'[red]{ERROR_MESSAGES["invalid_model"].format(model=new_model)}[/red]')
            console.print(f'[yellow]Available models: {", ".join(available_models)}[/yellow]')
            return api, model
        console.print(f'[green]{SUCCESS_MESSAGES["model_switched"].format(api=new_api, model=new_model)}[/green]')
        return new_api, new_model
    # List models
    if content.startswith('/ls ') or content.startswith('/list '):
        parts = content.split()
        if len(parts) != 2:
            console.print(f'[red]{USAGE_MESSAGES["list_command"]}[/red]')
            return api, model
        target_api = parts[1]
        if target_api not in SUPPORTED_APIS:
            console.print(f'[red]{ERROR_MESSAGES["invalid_api"].format(api=target_api)}[/red]')
            return api, model
        print_models(target_api)
        return api, model
    # Regular chat input
    process_chat_input(content, api, model)
    return api, model
