#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
COS+CDN性能测试脚本
测试从用户上传到用户加载处理后图片的端到端时间
用于优化扩容策略
"""

import time
import json
import requests
import hashlib
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple
from dataclasses import dataclass, asdict
from pathlib import Path
import os
import sys

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

@dataclass
class PerformanceMetrics:
    """性能指标"""
    upload_time: float
    processing_time: float
    download_time: float
    total_time: float
    file_size_original: int
    file_size_processed: int
    compression_ratio: float
    timestamp: datetime
    user_tier: str
    region: str
    cdn_enabled: bool

@dataclass
class ScalingRecommendation:
    """扩容建议"""
    current_servers: int
    recommended_servers: int
    reasoning: str
    performance_threshold: float
    user_load: int
    confidence: float

class COSCDNPerformanceTester:
    """COS+CDN性能测试器"""
    
    def __init__(self, config_path: str = "cos_cdn_config.json"):
        """初始化测试器"""
        self.config_path = config_path
        self.config = self._load_config()
        self.test_results = []
        
    def _load_config(self) -> Dict:
        """加载配置"""
        default_config = {
            "cos": {
                "bucket_name": "photoenhancei-bj-1259206939",
                "region": "ap-beijing",
                "secret_id": "",
                "secret_key": "",
                "domain": "photoenhancei-bj-1259206939.cos.ap-beijing.myqcloud.com"
            },
            "cdn": {
                "domain": "",
                "enabled": False
            },
            "test": {
                "test_images": [
                    "test_image_1.jpg",
                    "test_image_2.jpg", 
                    "test_image_3.jpg"
                ],
                "user_tiers": ["A", "B", "C"],
                "test_duration_minutes": 10,
                "concurrent_users": [1, 5, 10, 20]
            },
            "performance_thresholds": {
                "upload_time_max": 5.0,  # 秒
                "processing_time_max": 30.0,  # 秒
                "download_time_max": 3.0,  # 秒
                "total_time_max": 40.0  # 秒
            }
        }
        
        try:
            if os.path.exists(self.config_path):
                with open(self.config_path, 'r', encoding='utf-8') as f:
                    return json.load(f)
            else:
                # 创建默认配置文件
                with open(self.config_path, 'w', encoding='utf-8') as f:
                    json.dump(default_config, f, indent=2, ensure_ascii=False)
                logger.info(f"已创建默认配置文件: {self.config_path}")
                return default_config
        except Exception as e:
            logger.error(f"加载配置失败: {e}")
            return default_config
    
    def simulate_upload(self, image_path: str, user_tier: str) -> Tuple[float, int]:
        """模拟图片上传到COS"""
        try:
            start_time = time.time()
            
            # 模拟上传过程
            with open(image_path, 'rb') as f:
                file_data = f.read()
                file_size = len(file_data)
            
            # 这里应该调用实际的COS上传API
            # 目前使用模拟延迟
            upload_delay = self._calculate_upload_delay(file_size, user_tier)
            time.sleep(upload_delay)
            
            upload_time = time.time() - start_time
            
            logger.info(f"上传完成: {image_path}, 大小: {file_size} bytes, 耗时: {upload_time:.2f}s")
            return upload_time, file_size
            
        except Exception as e:
            logger.error(f"上传失败: {e}")
            return 0.0, 0
    
    def simulate_processing(self, image_path: str, user_tier: str) -> Tuple[float, int]:
        """模拟AI图片处理"""
        try:
            start_time = time.time()
            
            # 模拟AI处理过程
            processing_delay = self._calculate_processing_delay(user_tier)
            time.sleep(processing_delay)
            
            # 模拟处理后的文件大小（通常会更小）
            with open(image_path, 'rb') as f:
                original_size = len(f.read())
            
            # 模拟压缩效果
            compression_ratio = self._get_compression_ratio(user_tier)
            processed_size = int(original_size * compression_ratio)
            
            processing_time = time.time() - start_time
            
            logger.info(f"处理完成: {image_path}, 处理时间: {processing_time:.2f}s, 压缩比: {compression_ratio:.2f}")
            return processing_time, processed_size
            
        except Exception as e:
            logger.error(f"处理失败: {e}")
            return 0.0, 0
    
    def simulate_download(self, image_path: str, user_tier: str, cdn_enabled: bool = False) -> float:
        """模拟从COS/CDN下载图片"""
        try:
            start_time = time.time()
            
            # 模拟下载过程
            download_delay = self._calculate_download_delay(user_tier, cdn_enabled)
            time.sleep(download_delay)
            
            download_time = time.time() - start_time
            
            source = "CDN" if cdn_enabled else "COS"
            logger.info(f"下载完成: {image_path}, 来源: {source}, 耗时: {download_time:.2f}s")
            return download_time
            
        except Exception as e:
            logger.error(f"下载失败: {e}")
            return 0.0
    
    def _calculate_upload_delay(self, file_size: int, user_tier: str) -> float:
        """计算上传延迟"""
        # 基础延迟 + 文件大小影响 + 用户级别影响
        base_delay = 0.5
        size_factor = file_size / (1024 * 1024) * 0.1  # 每MB增加0.1秒
        tier_factor = {"A": 1.2, "B": 1.0, "C": 0.8}.get(user_tier, 1.0)
        
        return (base_delay + size_factor) * tier_factor
    
    def _calculate_processing_delay(self, user_tier: str) -> float:
        """计算处理延迟"""
        # 模拟不同用户级别的处理优先级
        base_processing_time = 25.0  # 基础处理时间25秒
        tier_multiplier = {"A": 1.5, "B": 1.0, "C": 0.7}.get(user_tier, 1.0)
        
        return base_processing_time * tier_multiplier
    
    def _calculate_download_delay(self, user_tier: str, cdn_enabled: bool) -> float:
        """计算下载延迟"""
        if cdn_enabled:
            # CDN下载更快
            base_delay = 0.3
            tier_factor = {"A": 1.1, "B": 1.0, "C": 0.9}.get(user_tier, 1.0)
        else:
            # 直接从COS下载
            base_delay = 1.0
            tier_factor = {"A": 1.3, "B": 1.0, "C": 0.8}.get(user_tier, 1.0)
        
        return base_delay * tier_factor
    
    def _get_compression_ratio(self, user_tier: str) -> float:
        """获取压缩比例"""
        # 不同用户级别使用不同的压缩策略
        ratios = {"A": 0.6, "B": 0.7, "C": 0.8}  # A级压缩更多
        return ratios.get(user_tier, 0.7)
    
    def run_single_test(self, image_path: str, user_tier: str, cdn_enabled: bool = False) -> PerformanceMetrics:
        """运行单次测试"""
        logger.info(f"开始测试: {image_path}, 用户级别: {user_tier}, CDN: {cdn_enabled}")
        
        # 1. 上传阶段
        upload_time, original_size = self.simulate_upload(image_path, user_tier)
        
        # 2. 处理阶段
        processing_time, processed_size = self.simulate_processing(image_path, user_tier)
        
        # 3. 下载阶段
        download_time = self.simulate_download(image_path, user_tier, cdn_enabled)
        
        # 4. 计算总时间和压缩比
        total_time = upload_time + processing_time + download_time
        compression_ratio = processed_size / original_size if original_size > 0 else 1.0
        
        metrics = PerformanceMetrics(
            upload_time=upload_time,
            processing_time=processing_time,
            download_time=download_time,
            total_time=total_time,
            file_size_original=original_size,
            file_size_processed=processed_size,
            compression_ratio=compression_ratio,
            timestamp=datetime.now(),
            user_tier=user_tier,
            region=self.config['cos']['region'],
            cdn_enabled=cdn_enabled
        )
        
        logger.info(f"测试完成: 总耗时 {total_time:.2f}s, 压缩比 {compression_ratio:.2f}")
        return metrics
    
    def run_performance_test_suite(self) -> List[PerformanceMetrics]:
        """运行完整的性能测试套件"""
        logger.info("开始运行性能测试套件")
        
        test_results = []
        user_tiers = self.config['test']['user_tiers']
        test_images = self.config['test']['test_images']
        
        # 创建测试图片（如果不存在）
        self._create_test_images()
        
        for user_tier in user_tiers:
            for image_name in test_images:
                image_path = f"test_images/{image_name}"
                
                if not os.path.exists(image_path):
                    logger.warning(f"测试图片不存在: {image_path}")
                    continue
                
                # 测试CDN关闭的情况
                metrics_no_cdn = self.run_single_test(image_path, user_tier, cdn_enabled=False)
                test_results.append(metrics_no_cdn)
                
                # 测试CDN开启的情况
                if self.config['cdn']['enabled']:
                    metrics_with_cdn = self.run_single_test(image_path, user_tier, cdn_enabled=True)
                    test_results.append(metrics_with_cdn)
        
        self.test_results = test_results
        logger.info(f"性能测试完成，共 {len(test_results)} 个测试结果")
        return test_results
    
    def _create_test_images(self):
        """创建测试图片"""
        test_dir = Path("test_images")
        test_dir.mkdir(exist_ok=True)
        
        # 创建一些简单的测试图片
        test_images = [
            "test_image_1.jpg",
            "test_image_2.jpg",
            "test_image_3.jpg"
        ]
        
        for image_name in test_images:
            image_path = test_dir / image_name
            if not image_path.exists():
                # 创建一个简单的测试文件
                with open(image_path, 'wb') as f:
                    # 写入一些模拟的图片数据
                    f.write(b'\xff\xd8\xff\xe0' + b'0' * 1024 * 100)  # 模拟100KB的JPEG
                logger.info(f"创建测试图片: {image_path}")
    
    def analyze_performance(self) -> Dict:
        """分析性能数据"""
        if not self.test_results:
            return {"error": "没有测试数据"}
        
        analysis = {
            "total_tests": len(self.test_results),
            "average_metrics": {},
            "tier_performance": {},
            "cdn_impact": {},
            "threshold_violations": [],
            "recommendations": []
        }
        
        # 计算平均指标
        total_upload = sum(m.upload_time for m in self.test_results)
        total_processing = sum(m.processing_time for m in self.test_results)
        total_download = sum(m.download_time for m in self.test_results)
        total_time = sum(m.total_time for m in self.test_results)
        
        analysis["average_metrics"] = {
            "upload_time": total_upload / len(self.test_results),
            "processing_time": total_processing / len(self.test_results),
            "download_time": total_download / len(self.test_results),
            "total_time": total_time / len(self.test_results)
        }
        
        # 按用户级别分析
        for tier in ["A", "B", "C"]:
            tier_results = [m for m in self.test_results if m.user_tier == tier]
            if tier_results:
                analysis["tier_performance"][tier] = {
                    "count": len(tier_results),
                    "avg_total_time": sum(m.total_time for m in tier_results) / len(tier_results),
                    "avg_processing_time": sum(m.processing_time for m in tier_results) / len(tier_results)
                }
        
        # CDN影响分析
        cdn_results = [m for m in self.test_results if m.cdn_enabled]
        no_cdn_results = [m for m in self.test_results if not m.cdn_enabled]
        
        if cdn_results and no_cdn_results:
            cdn_avg_download = sum(m.download_time for m in cdn_results) / len(cdn_results)
            no_cdn_avg_download = sum(m.download_time for m in no_cdn_results) / len(no_cdn_results)
            
            analysis["cdn_impact"] = {
                "download_speed_improvement": (no_cdn_avg_download - cdn_avg_download) / no_cdn_avg_download * 100,
                "cdn_avg_download": cdn_avg_download,
                "no_cdn_avg_download": no_cdn_avg_download
            }
        
        # 检查阈值违规
        thresholds = self.config['performance_thresholds']
        for metrics in self.test_results:
            violations = []
            if metrics.upload_time > thresholds['upload_time_max']:
                violations.append(f"上传时间超限: {metrics.upload_time:.2f}s > {thresholds['upload_time_max']}s")
            if metrics.processing_time > thresholds['processing_time_max']:
                violations.append(f"处理时间超限: {metrics.processing_time:.2f}s > {thresholds['processing_time_max']}s")
            if metrics.download_time > thresholds['download_time_max']:
                violations.append(f"下载时间超限: {metrics.download_time:.2f}s > {thresholds['download_time_max']}s")
            if metrics.total_time > thresholds['total_time_max']:
                violations.append(f"总时间超限: {metrics.total_time:.2f}s > {thresholds['total_time_max']}s")
            
            if violations:
                analysis["threshold_violations"].append({
                    "user_tier": metrics.user_tier,
                    "cdn_enabled": metrics.cdn_enabled,
                    "violations": violations
                })
        
        return analysis
    
    def generate_scaling_recommendations(self, analysis: Dict) -> List[ScalingRecommendation]:
        """生成扩容建议"""
        recommendations = []
        
        if "error" in analysis:
            return recommendations
        
        # 基于性能阈值违规生成建议
        violations = analysis.get("threshold_violations", [])
        if violations:
            # 计算需要增加的服务器数量
            violation_count = len(violations)
            recommended_servers = max(1, violation_count // 2)  # 每2个违规增加1个服务器
            
            recommendations.append(ScalingRecommendation(
                current_servers=1,  # 假设当前1个服务器
                recommended_servers=1 + recommended_servers,
                reasoning=f"检测到 {violation_count} 个性能阈值违规，建议增加 {recommended_servers} 个服务器",
                performance_threshold=analysis["average_metrics"]["total_time"],
                user_load=len(self.test_results),
                confidence=0.8
            ))
        
        # 基于用户级别性能差异生成建议
        tier_performance = analysis.get("tier_performance", {})
        if len(tier_performance) > 1:
            # 如果不同用户级别性能差异很大，建议按级别分配服务器
            max_time = max(tier["avg_total_time"] for tier in tier_performance.values())
            min_time = min(tier["avg_total_time"] for tier in tier_performance.values())
            
            if max_time / min_time > 1.5:  # 性能差异超过50%
                recommendations.append(ScalingRecommendation(
                    current_servers=1,
                    recommended_servers=3,  # 为每个用户级别分配服务器
                    reasoning=f"用户级别性能差异过大 ({max_time/min_time:.2f}倍)，建议按级别分配服务器",
                    performance_threshold=max_time,
                    user_load=sum(tier["count"] for tier in tier_performance.values()),
                    confidence=0.7
                ))
        
        return recommendations
    
    def save_results(self, output_path: str = "performance_test_results.json"):
        """保存测试结果"""
        results = {
            "test_config": self.config,
            "test_results": [asdict(metrics) for metrics in self.test_results],
            "analysis": self.analyze_performance(),
            "scaling_recommendations": [asdict(rec) for rec in self.generate_scaling_recommendations(self.analyze_performance())],
            "timestamp": datetime.now().isoformat()
        }
        
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(results, f, indent=2, ensure_ascii=False, default=str)
        
        logger.info(f"测试结果已保存到: {output_path}")

def main():
    """主函数"""
    print("🚀 COS+CDN性能测试系统")
    print("=" * 50)
    
    # 创建测试器
    tester = COSCDNPerformanceTester()
    
    # 运行性能测试
    print("开始运行性能测试...")
    test_results = tester.run_performance_test_suite()
    
    # 分析结果
    print("分析测试结果...")
    analysis = tester.analyze_performance()
    
    # 生成扩容建议
    print("生成扩容建议...")
    recommendations = tester.generate_scaling_recommendations(analysis)
    
    # 保存结果
    tester.save_results()
    
    # 显示结果
    print("\n📊 测试结果摘要:")
    print(f"总测试数: {analysis.get('total_tests', 0)}")
    print(f"平均总时间: {analysis.get('average_metrics', {}).get('total_time', 0):.2f}s")
    print(f"平均处理时间: {analysis.get('average_metrics', {}).get('processing_time', 0):.2f}s")
    print(f"平均下载时间: {analysis.get('average_metrics', {}).get('download_time', 0):.2f}s")
    
    if analysis.get('cdn_impact'):
        cdn_impact = analysis['cdn_impact']
        print(f"CDN下载速度提升: {cdn_impact.get('download_speed_improvement', 0):.1f}%")
    
    print(f"\n⚠️ 性能阈值违规: {len(analysis.get('threshold_violations', []))} 个")
    
    print(f"\n🎯 扩容建议: {len(recommendations)} 个")
    for i, rec in enumerate(recommendations, 1):
        print(f"  {i}. {rec.reasoning}")
        print(f"     建议服务器数: {rec.recommended_servers}")
        print(f"     置信度: {rec.confidence:.2f}")

if __name__ == "__main__":
    main()
