"""
Utility functions for Qwen3 client
"""

import os
import logging
import re
from pathlib import Path
from typing import Optional


def setup_logging(level: str = "INFO", log_file: Optional[str] = None) -> logging.Logger:
    """
    Setup logging for Qwen3 client
    
    Args:
        level: Logging level (DEBUG, INFO, WARNING, ERROR)
        log_file: Optional log file path
    
    Returns:
        Configured logger
    """
    logger = logging.getLogger("qwen3_client")
    logger.setLevel(getattr(logging, level.upper()))
    
    # Clear existing handlers
    logger.handlers.clear()
    
    # Create formatter
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    
    # Console handler
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    logger.addHandler(console_handler)
    
    # File handler if specified
    if log_file:
        file_handler = logging.FileHandler(log_file, encoding='utf-8')
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
    
    return logger


def validate_api_key(api_key: str) -> bool:
    """
    Validate API key format
    
    Args:
        api_key: API key to validate
    
    Returns:
        True if format is valid, False otherwise
    """
    if not api_key:
        return False
    
    # Basic validation - should be non-empty string
    if not isinstance(api_key, str) or len(api_key.strip()) == 0:
        return False
    
    # Check for reasonable length (API keys are usually long)
    if len(api_key) < 10:
        return False
    
    return True


def load_env_file(env_path: Optional[str] = None) -> bool:
    """
    Load environment variables from .env file
    
    Args:
        env_path: Path to .env file (optional)
    
    Returns:
        True if loaded successfully, False otherwise
    """
    try:
        from dotenv import load_dotenv

        loaded = False
        seen_paths = set()

        def try_load(path: Path, override: bool = False):
            nonlocal loaded
            if not path:
                return
            resolved = path.expanduser().resolve()
            if resolved in seen_paths:
                return
            if resolved.exists():
                load_dotenv(resolved, override=override)
                seen_paths.add(resolved)
                loaded = True

        if env_path:
            try_load(Path(env_path), override=True)
            return loaded

        package_root = Path(__file__).resolve().parent.parent
        try_load(package_root / ".env")

        current_dir = Path.cwd()

        for parent in [current_dir] + list(current_dir.parents):
            candidate = parent / ".env"
            if candidate.exists():
                try_load(candidate, override=True)
                break

        extra_paths = os.getenv("QWEN_ENV_PATHS", "")
        if extra_paths:
            for raw_path in extra_paths.split(os.pathsep):
                raw_path = raw_path.strip()
                if raw_path:
                    try_load(Path(raw_path))

        sibling_names = {"semantic-rag-py"}
        for base_dir in [current_dir] + list(current_dir.parents) + [package_root]:
            for sibling in sibling_names:
                candidate = base_dir / sibling / ".env"
                try_load(candidate)

        return loaded

    except ImportError:
        return False


def sanitize_filename(filename: str, max_length: int = 50) -> str:
    """
    Sanitize filename for safe file operations
    
    Args:
        filename: Original filename
        max_length: Maximum length for filename
    
    Returns:
        Sanitized filename
    """
    # Remove or replace invalid characters
    safe_filename = re.sub(r'[<>:"/\\|?*]', '_', filename)
    
    # Remove multiple spaces and replace with single underscore
    safe_filename = re.sub(r'\s+', '_', safe_filename)
    
    # Remove leading/trailing whitespace and underscores
    safe_filename = safe_filename.strip('_').strip()
    
    # Limit length
    if len(safe_filename) > max_length:
        safe_filename = safe_filename[:max_length]
    
    # Ensure it's not empty
    if not safe_filename:
        safe_filename = "unnamed"
    
    return safe_filename


def extract_keywords_from_text(text: str, max_keywords: int = 10) -> list:
    """
    Extract keywords from text
    
    Args:
        text: Input text
        max_keywords: Maximum number of keywords to return
    
    Returns:
        List of extracted keywords
    """
    # Simple keyword extraction
    words = re.findall(r'\b[A-Za-z]{3,}\b', text.lower())
    
    # Filter common words
    stop_words = {
        'the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with',
        'by', 'from', 'up', 'about', 'into', 'through', 'during', 'before',
        'after', 'above', 'below', 'between', 'among', 'throughout', 'despite',
        'towards', 'upon', 'concerning', 'that', 'this', 'these', 'those',
        'they', 'them', 'their', 'there', 'then', 'than', 'when', 'where',
        'why', 'how', 'what', 'which', 'who', 'whom', 'whose', 'whether',
        'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing',
        'will', 'would', 'could', 'should', 'may', 'might', 'must', 'can',
        'been', 'being', 'are', 'was', 'were', 'am', 'is', 'said', 'say',
        'each', 'time', 'other', 'some', 'many', 'most', 'more', 'also',
        'very', 'just', 'only', 'even', 'much', 'such', 'like', 'well'
    }
    
    # Filter stop words and get unique words
    keywords = [word for word in set(words) if word not in stop_words and len(word) > 3]
    
    # Sort by frequency (simple approach)
    word_freq = {}
    for word in words:
        if word in keywords:
            word_freq[word] = word_freq.get(word, 0) + 1
    
    # Sort by frequency and return top keywords
    sorted_keywords = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
    return [word for word, freq in sorted_keywords[:max_keywords]]


def format_response_for_display(response: str, max_width: int = 80) -> str:
    """
    Format response text for better display
    
    Args:
        response: Response text from Qwen3
        max_width: Maximum line width
    
    Returns:
        Formatted response text
    """
    lines = response.split('\n')
    formatted_lines = []
    
    for line in lines:
        if len(line) <= max_width:
            formatted_lines.append(line)
        else:
            # Simple word wrapping
            words = line.split()
            current_line = []
            current_length = 0
            
            for word in words:
                if current_length + len(word) + 1 <= max_width:
                    current_line.append(word)
                    current_length += len(word) + 1
                else:
                    if current_line:
                        formatted_lines.append(' '.join(current_line))
                    current_line = [word]
                    current_length = len(word)
            
            if current_line:
                formatted_lines.append(' '.join(current_line))
    
    return '\n'.join(formatted_lines)


def estimate_tokens(text: str) -> int:
    """
    Estimate token count for text (rough approximation)
    
    Args:
        text: Input text
    
    Returns:
        Estimated token count
    """
    # Rough estimation: ~4 characters per token for English/Chinese mixed text
    return len(text) // 4


def truncate_text_to_tokens(text: str, max_tokens: int) -> str:
    """
    Truncate text to approximate token limit
    
    Args:
        text: Input text
        max_tokens: Maximum number of tokens
    
    Returns:
        Truncated text
    """
    estimated_chars = max_tokens * 4
    
    if len(text) <= estimated_chars:
        return text
    
    # Truncate and try to end at a sentence boundary
    truncated = text[:estimated_chars]
    
    # Find last sentence ending
    for delimiter in ['. ', '。', '! ', '！', '? ', '？']:
        last_sentence = truncated.rfind(delimiter)
        if last_sentence > estimated_chars * 0.8:  # Don't truncate too much
            return truncated[:last_sentence + len(delimiter)]
    
    # If no good sentence boundary found, just truncate
    return truncated + "..."
