"""
Performance optimization utilities for the DingTalk Card Sender
"""
import time
import functools
from typing import Callable, Any
from fastapi import Response
import hashlib


def cache_result(ttl: int = 300):
    """
    Decorator to cache function results for TTL seconds
    """
    def decorator(func: Callable) -> Callable:
        cache = {}
        
        @functools.wraps(func)
        async def wrapper(*args, **kwargs):
            # Create a cache key from function name and arguments
            key = f"{func.__name__}:{str(args)}:{str(sorted(kwargs.items()))}"
            current_time = time.time()
            
            # Check if result is in cache and still valid
            if key in cache:
                result, timestamp = cache[key]
                if current_time - timestamp < ttl:
                    return result
            
            # Execute function and cache result
            result = await func(*args, **kwargs)
            cache[key] = (result, current_time)
            
            # Clean up expired entries periodically
            if len(cache) > 100:  # Limit cache size
                expired_keys = [
                    k for k, (_, t) in cache.items() 
                    if current_time - t >= ttl
                ]
                for k in expired_keys:
                    del cache[k]
            
            return result
        
        return wrapper
    return decorator


def compress_response(response: Response):
    """
    Compress response data where possible
    """
    # This would normally integrate with middleware to compress response data
    # For now, this is a placeholder
    pass


def add_cache_headers(response: Response, cache_duration: int = 300):
    """
    Add cache headers to response
    """
    response.headers["Cache-Control"] = f"public, max-age={cache_duration}"
    response.headers["Expires"] = time.strftime(
        "%a, %d %b %Y %H:%M:%S GMT", 
        time.gmtime(time.time() + cache_duration)
    )


def etag_middleware(response_data: str) -> str:
    """
    Generate ETag for response data
    """
    return hashlib.md5(response_data.encode()).hexdigest()


# Performance monitoring utilities
class PerformanceMonitor:
    """
    Monitor performance of function calls
    """
    
    def __init__(self):
        self.metrics = {}
    
    def measure(self, name: str):
        """
        Decorator to measure execution time of functions
        """
        def decorator(func: Callable) -> Callable:
            @functools.wraps(func)
            async def wrapper(*args, **kwargs):
                start_time = time.perf_counter()
                
                try:
                    result = await func(*args, **kwargs)
                    execution_time = time.perf_counter() - start_time
                    
                    # Store metrics
                    if name not in self.metrics:
                        self.metrics[name] = []
                    self.metrics[name].append(execution_time)
                    
                    # Print slow functions (for development)
                    if execution_time > 0.1:  # 100ms threshold
                        print(f"SLOW FUNCTION: {name} took {execution_time:.3f}s")
                    
                    return result
                except Exception as e:
                    execution_time = time.perf_counter() - start_time
                    print(f"ERROR in {name} after {execution_time:.3f}s: {str(e)}")
                    raise
            
            return wrapper
        return decorator

    def get_metrics(self) -> dict:
        """
        Get collected performance metrics
        """
        if not self.metrics:
            return {}
        
        result = {}
        for func_name, times in self.metrics.items():
            result[func_name] = {
                "count": len(times),
                "total_time": sum(times),
                "avg_time": sum(times) / len(times),
                "min_time": min(times),
                "max_time": max(times)
            }
        return result

    def reset_metrics(self):
        """
        Reset collected metrics
        """
        self.metrics = {}


# Global performance monitor instance
perf_monitor = PerformanceMonitor()


def optimize_json_serialization():
    """
    Optimized JSON serialization functions
    """
    import orjson  # This is faster than standard json, if available
    
    def dumps_optimized(obj) -> str:
        return orjson.dumps(obj).decode('utf-8')
    
    def loads_optimized(s: str):
        return orjson.loads(s)
    
    # If orjson is not available, fall back to standard json
    try:
        import orjson
        return dumps_optimized, loads_optimized
    except ImportError:
        import json
        return json.dumps, json.loads


# Frontend performance optimization
def minify_js_code(js_code: str) -> str:
    """
    Simple JavaScript minification
    This is a basic implementation; production code should use proper minifier
    """
    # Remove comments
    import re
    # Remove single-line comments
    js_code = re.sub(r'//.*?$', '', js_code, flags=re.MULTILINE)
    # Remove multi-line comments
    js_code = re.sub(r'/\*.*?\*/', '', js_code, flags=re.DOTALL)
    # Remove extra whitespace
    js_code = re.sub(r'\s+', ' ', js_code)
    # Remove whitespace around operators
    js_code = re.sub(r'\s*([{};:,>+])\s*', r'\1', js_code)
    
    return js_code.strip()


def bundle_resources(file_paths: list) -> str:
    """
    Bundle multiple resource files into a single resource
    """
    bundled_content = ""
    for path in file_paths:
        try:
            with open(path, 'r', encoding='utf-8') as f:
                content = f.read()
                bundled_content += f"\n/* BUNDLED: {path} */\n{content}\n"
        except FileNotFoundError:
            print(f"Warning: File not found: {path}")
    
    return bundled_content