"""
通用流式响应工具 - 基于Linus设计哲学
"好代码没有特殊情况" - 统一所有流式响应模式
"""

import asyncio
import random
import time
from typing import AsyncGenerator, Dict, Any, Optional, List
from gua import StreamResponse, SSEResponse


class StreamBuilder:
    """Linus式流式响应构建器 - 消除所有重复代码"""
    
    def __init__(self):
        self.default_headers = {
            "cache-control": "no-cache, no-store, must-revalidate",
            "pragma": "no-cache", 
            "expires": "0",
            "connection": "keep-alive",
            "x-accel-buffering": "no",
        }
    
    def create_simple_stream(
        self, 
        data_count: int = 10, 
        delay: float = 0.5,
        data_prefix: str = "data",
        headers: Optional[Dict[str, str]] = None
    ) -> StreamResponse:
        """创建简单数据流 - 消除streaming_api.py中的重复函数"""
        
        final_headers = self.default_headers.copy()
        if headers:
            final_headers.update(headers)
        
        async def generate():
            for i in range(data_count):
                yield f"{data_prefix}_{i}\n"
                await asyncio.sleep(delay)
                await asyncio.sleep(0)  # 强制刷新
        
        return StreamResponse(generate(), headers=final_headers)
    
    def create_sse_events(
        self,
        events: Optional[List[Dict[str, Any]]] = None,
        custom_generator: Optional[AsyncGenerator] = None
    ) -> SSEResponse:
        """创建SSE事件流 - 统一所有SSE模式"""
        
        if custom_generator:
            return SSEResponse(custom_generator)
        
        # 默认事件序列
        default_events = [
            {"event": "connect", "data": {"message": "连接建立"}},
            {"event": "message", "data": "事件1", "id": "event_1"},
            {"event": "message", "data": "事件2", "id": "event_2"},
            {"event": "random", "data": {"random": random.random(), "timestamp": time.time()}},
            {"event": "end", "data": {"total": 3, "message": "事件流结束"}}
        ]
        
        events = events or default_events
        
        async def generate_events():
            for event in events:
                yield event
                await asyncio.sleep(0.5)
        
        return SSEResponse(generate_events())
    
    def create_ai_chat_stream(
        self,
        response_text: str,
        thinking_time: float = 1.0,
        typing_delay_range: tuple = (0.05, 0.3)
    ) -> SSEResponse:
        """创建AI对话流 - 消除ai_streaming.py中的复杂延迟函数"""
        
        async def generate_chat():
            # 开始事件
            yield {
                "event": "message",
                "data": {"thinking": "AI正在思考...", "type": "start"}
            }
            
            # 模拟思考时间
            await asyncio.sleep(thinking_time)
            
            # 逐字输出 - Linus式简化
            for i, char in enumerate(response_text):
                # 简化的延迟计算 - 不需要38行的复杂函数
                delay = random.uniform(*typing_delay_range)
                
                # 偶尔的长停顿
                if i > 0 and i % 10 == 0 and random.random() > 0.7:
                    delay = random.uniform(0.5, 1.2)
                
                await asyncio.sleep(delay)
                
                yield {
                    "event": "message",
                    "data": {
                        "content": char,
                        "index": i,
                        "total": len(response_text),
                        "type": "chunk"
                    }
                }
            
            # 结束事件
            await asyncio.sleep(random.uniform(0.3, 0.8))
            yield {
                "event": "message",
                "data": {"type": "end", "total_chars": len(response_text)}
            }
        
        return SSEResponse(generate_chat())
    
    def create_file_download_stream(
        self,
        filename: str,
        file_size: str = "medium",
        content_type: str = "text/plain"
    ) -> StreamResponse:
        """创建文件下载流 - 消除file_streaming.py中的重复函数"""
        
        # 统一文件大小配置
        size_configs = {
            "small": {"lines": 20, "sleep": 0.01, "prefix": "小文件"},
            "medium": {"lines": 100, "sleep": 0.001, "prefix": "中等文件"},
            "large": {"lines": 1000, "sleep": 0.0001, "prefix": "大文件"}
        }
        
        config = size_configs.get(file_size, size_configs["medium"])
        
        headers = {
            "content-type": content_type,
            "content-disposition": f"attachment; filename={filename}"
        }
        
        async def generate_file():
            for i in range(config["lines"]):
                if file_size == "large":
                    chunk = f"{config['prefix']}第 {i+1} 行，包含更多数据: " + "x" * 100 + "\n"
                else:
                    chunk = f"{config['prefix']}第 {i+1} 行数据，时间戳: {time.time()}\n" * 10
                
                yield chunk
                await asyncio.sleep(config["sleep"])
        
        return StreamResponse(generate_file(), headers=headers)
    
    def create_buffer_demo_stream(
        self,
        buffer_size: int,
        demo_name: str = "缓冲演示"
    ) -> StreamResponse:
        """创建真实缓冲效果演示 - Linus式重构：真正测试缓冲性能"""
        
        # Linus式简化：数据块大小与缓冲区成比例，确保缓冲机制真正生效
        if buffer_size == 0:
            chunk_size = 512  # 零缓冲：小数据块，立即发送
            description = "零缓冲模式：每个数据块立即发送"
            expected_behavior = "每个数据块都会立即触发网络传输"
        else:
            # 缓冲模式：4个数据块填满缓冲区，真正测试缓冲效果
            chunk_size = buffer_size // 4
            description = f"缓冲模式：每{4}个数据块填满缓冲区({buffer_size}字节)"
            expected_behavior = "每4个数据块触发一次网络传输，减少网络开销"
        
        async def generate_demo():
            yield f"🎯 {demo_name} (buffer_size={buffer_size})\n"
            yield f"📊 {description}\n"
            yield f"📦 数据块大小: {chunk_size}字节\n"
            yield f"🎯 预期行为: {expected_behavior}\n"
            yield "=" * 60 + "\n\n"
            
            # 记录实际发送时间戳，用于测量真实网络延迟
            send_times = []
            start_time = time.time()
            
            for i in range(12):  # 12个数据块，确保有多个缓冲周期
                current_time = time.time()
                elapsed_ms = (current_time - start_time) * 1000
                
                # 记录每个数据块的生成时间
                send_times.append(current_time)
                
                # 创建数据块：包含实际内容而非占位符
                data_content = f"数据块{i+1}" + "x" * (chunk_size - 20)  # 确保大小接近chunk_size
                data_chunk = f"█[数据块{i+1}] 时间戳: {current_time:.3f}s (生成延迟: {elapsed_ms:.1f}ms)█\n"
                data_chunk += data_content + "\n"
                
                yield data_chunk
                
                # 零缓冲模式：短暂延迟，让网络传输有时间完成
                if buffer_size == 0:
                    await asyncio.sleep(0.005)  # 5ms，足够短以体现立即发送
                else:
                    await asyncio.sleep(0.002)  # 2ms，让数据快速生成以测试缓冲效果
            
            total_time = (time.time() - start_time) * 1000
            yield "=" * 60 + "\n"
            yield f"✅ 演示完成！总耗时: {total_time:.1f}ms\n"
            
            # 分析缓冲效果
            if buffer_size == 0:
                yield "💡 零缓冲分析: 每个数据块都应立即触发网络传输\n"
                yield "📈 预期网络传输次数: 12次 (每块一次)\n"
                yield "⚡ 响应性: 最高延迟 <5ms\n"
                yield "⚖️ 适用场景: 实时聊天、游戏、股票交易\n"
            else:
                yield "💡 缓冲分析: 每4个数据块应触发一次网络传输\n"
                yield "📈 预期网络传输次数: 3次 (每4块一次)\n"
                yield "🚀 缓冲优化: 网络传输次数减少75% (12→3次)\n"
                yield "⚡ 响应性: 最高延迟 <10ms × 4 = <40ms\n"
                
                if buffer_size <= 4096:
                    yield "⚖️ 适用场景: 低延迟API响应、实时仪表板\n"
                elif buffer_size <= 16384:
                    yield "⚖️ 适用场景: 一般Web应用、文件下载\n"
                else:
                    yield "⚖️ 适用场景: 大文件传输、视频流、批量数据处理\n"
        
        return StreamResponse(generate_demo(), buffer_size=buffer_size)


# 全局实例 - 单例模式
stream_builder = StreamBuilder()


def create_simple_stream(*args, **kwargs) -> StreamResponse:
    """便捷函数：创建简单数据流"""
    return stream_builder.create_simple_stream(*args, **kwargs)


def create_sse_events(*args, **kwargs) -> SSEResponse:
    """便捷函数：创建SSE事件流"""
    return stream_builder.create_sse_events(*args, **kwargs)


def create_ai_chat_stream(*args, **kwargs) -> SSEResponse:
    """便捷函数：创建AI对话流"""
    return stream_builder.create_ai_chat_stream(*args, **kwargs)


def create_file_download_stream(*args, **kwargs) -> StreamResponse:
    """便捷函数：创建文件下载流"""
    return stream_builder.create_file_download_stream(*args, **kwargs)


def create_buffer_demo_stream(*args, **kwargs) -> StreamResponse:
    """便捷函数：创建缓冲演示流"""
    return stream_builder.create_buffer_demo_stream(*args, **kwargs)