import os
import time
import requests
import json
import matplotlib.pyplot as plt
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm

# 性能测试脚本 - 模拟API负载测试

class PerformanceTester:
    def __init__(self, base_url, endpoints, concurrent_users=100, test_duration=60):
        self.base_url = base_url
        self.endpoints = endpoints
        self.concurrent_users = concurrent_users
        self.test_duration = test_duration
        self.results = {endpoint: [] for endpoint in endpoints}
        self.errors = {endpoint: 0 for endpoint in endpoints}
        
    def make_request(self, endpoint):
        start_time = time.time()
        try:
            response = requests.get(f"{self.base_url}{endpoint}", timeout=10)
            if response.status_code != 200:
                self.errors[endpoint] += 1
                return None
            return time.time() - start_time
        except Exception as e:
            self.errors[endpoint] += 1
            return None
    
    def run_test(self):
        print(f"开始性能测试: {self.concurrent_users}个并发用户, 持续{self.test_duration}秒")
        
        end_time = time.time() + self.test_duration
        
        with ThreadPoolExecutor(max_workers=self.concurrent_users) as executor:
            while time.time() < end_time:
                for endpoint in self.endpoints:
                    future = executor.submit(self.make_request, endpoint)
                    result = future.result()
                    if result is not None:
                        self.results[endpoint].append(result)
        
        self.analyze_results()
    
    def analyze_results(self):
        print("\n性能测试结果:")
        print("-" * 50)
        
        for endpoint in self.endpoints:
            if not self.results[endpoint]:
                print(f"端点 {endpoint}: 无有效结果")
                continue
                
            response_times = self.results[endpoint]
            avg_time = sum(response_times) / len(response_times)
            max_time = max(response_times)
            min_time = min(response_times)
            p95_time = np.percentile(response_times, 95)
            
            total_requests = len(response_times) + self.errors[endpoint]
            error_rate = (self.errors[endpoint] / total_requests) * 100 if total_requests > 0 else 0
            
            print(f"端点: {endpoint}")
            print(f"  总请求数: {total_requests}")
            print(f"  成功请求数: {len(response_times)}")
            print(f"  错误数: {self.errors[endpoint]} (错误率: {error_rate:.2f}%)")
            print(f"  平均响应时间: {avg_time*1000:.2f}ms")
            print(f"  最大响应时间: {max_time*1000:.2f}ms")
            print(f"  最小响应时间: {min_time*1000:.2f}ms")
            print(f"  95%响应时间: {p95_time*1000:.2f}ms")
            print(f"  每秒请求数(RPS): {len(response_times)/self.test_duration:.2f}")
            print("-" * 50)
            
            # 生成响应时间分布图
            plt.figure(figsize=(10, 6))
            plt.hist(response_times, bins=20, alpha=0.7)
            plt.title(f'响应时间分布 - {endpoint}')
            plt.xlabel('响应时间 (秒)')
            plt.ylabel('请求数')
            plt.grid(True, alpha=0.3)
            
            # 保存图表
            os.makedirs('performance_results', exist_ok=True)
            plt.savefig(f'performance_results/{endpoint.replace("/", "_")}_response_time.png')
            plt.close()
    
    def generate_report(self, filename="performance_test_report.md"):
        with open(filename, 'w') as f:
            f.write("# API性能测试报告\n\n")
            f.write(f"- 并发用户数: {self.concurrent_users}\n")
            f.write(f"- 测试持续时间: {self.test_duration}秒\n")
            f.write(f"- 基础URL: {self.base_url}\n\n")
            
            f.write("## 测试结果摘要\n\n")
            f.write("| 端点 | 总请求数 | 成功率 | 平均响应时间 | 95%响应时间 | 每秒请求数 |\n")
            f.write("|------|----------|--------|--------------|-------------|------------|\n")
            
            for endpoint in self.endpoints:
                if not self.results[endpoint]:
                    continue
                    
                response_times = self.results[endpoint]
                total_requests = len(response_times) + self.errors[endpoint]
                success_rate = (len(response_times) / total_requests) * 100 if total_requests > 0 else 0
                avg_time = sum(response_times) / len(response_times) * 1000  # 转换为毫秒
                p95_time = np.percentile(response_times, 95) * 1000  # 转换为毫秒
                rps = len(response_times) / self.test_duration
                
                f.write(f"| {endpoint} | {total_requests} | {success_rate:.2f}% | {avg_time:.2f}ms | {p95_time:.2f}ms | {rps:.2f} |\n")
            
            f.write("\n## 详细结果\n\n")
            
            for endpoint in self.endpoints:
                if not self.results[endpoint]:
                    f.write(f"### 端点: {endpoint}\n\n")
                    f.write("无有效结果\n\n")
                    continue
                    
                response_times = self.results[endpoint]
                avg_time = sum(response_times) / len(response_times)
                max_time = max(response_times)
                min_time = min(response_times)
                p95_time = np.percentile(response_times, 95)
                
                total_requests = len(response_times) + self.errors[endpoint]
                error_rate = (self.errors[endpoint] / total_requests) * 100 if total_requests > 0 else 0
                
                f.write(f"### 端点: {endpoint}\n\n")
                f.write(f"- 总请求数: {total_requests}\n")
                f.write(f"- 成功请求数: {len(response_times)}\n")
                f.write(f"- 错误数: {self.errors[endpoint]} (错误率: {error_rate:.2f}%)\n")
                f.write(f"- 平均响应时间: {avg_time*1000:.2f}ms\n")
                f.write(f"- 最大响应时间: {max_time*1000:.2f}ms\n")
                f.write(f"- 最小响应时间: {min_time*1000:.2f}ms\n")
                f.write(f"- 95%响应时间: {p95_time*1000:.2f}ms\n")
                f.write(f"- 每秒请求数(RPS): {len(response_times)/self.test_duration:.2f}\n\n")
                
                f.write(f"![响应时间分布](performance_results/{endpoint.replace('/', '_')}_response_time.png)\n\n")
            
            f.write("## 结论与建议\n\n")
            
            # 根据测试结果给出建议
            slow_endpoints = []
            high_error_endpoints = []
            
            for endpoint in self.endpoints:
                if not self.results[endpoint]:
                    continue
                
                response_times = self.results[endpoint]
                avg_time = sum(response_times) / len(response_times)
                total_requests = len(response_times) + self.errors[endpoint]
                error_rate = (self.errors[endpoint] / total_requests) * 100 if total_requests > 0 else 0
                
                if avg_time > 0.3:  # 如果平均响应时间超过300ms
                    slow_endpoints.append((endpoint, avg_time))
                
                if error_rate > 5:  # 如果错误率超过5%
                    high_error_endpoints.append((endpoint, error_rate))
            
            if slow_endpoints:
                f.write("### 需要优化的慢接口:\n\n")
                for endpoint, time in slow_endpoints:
                    f.write(f"- {endpoint}: 平均响应时间 {time*1000:.2f}ms\n")
                f.write("\n建议:\n- 检查数据库查询优化\n- 考虑添加缓存\n- 检查是否有不必要的计算\n\n")
            
            if high_error_endpoints:
                f.write("### 错误率高的接口:\n\n")
                for endpoint, rate in high_error_endpoints:
                    f.write(f"- {endpoint}: 错误率 {rate:.2f}%\n")
                f.write("\n建议:\n- 检查错误日志\n- 增加错误处理机制\n- 考虑添加重试机制\n\n")
            
            if not slow_endpoints and not high_error_endpoints:
                f.write("所有测试的API端点性能良好，错误率在可接受范围内。系统可以支持当前的并发用户数。\n\n")
            
            f.write("### 整体建议\n\n")
            f.write("1. 持续监控系统性能\n")
            f.write("2. 定期进行性能测试\n")
            f.write("3. 实施自动扩展机制以应对流量峰值\n")
            f.write("4. 优化数据库查询和索引\n")
            f.write("5. 考虑实施CDN加速静态资源\n")
        
        print(f"性能测试报告已生成: {filename}")

# 示例使用
if __name__ == "__main__":
    # 这里使用模拟的API端点，实际使用时替换为真实的API
    base_url = "http://localhost:5000/api"
    endpoints = [
        "/user/profile",
        "/bazi/analysis",
        "/character/analysis",
        "/fengshui/analysis",
        "/tools/calendar"
    ]
    
    # 运行测试 - 100个并发用户，持续60秒
    tester = PerformanceTester(base_url, endpoints, concurrent_users=100, test_duration=60)
    tester.run_test()
    tester.generate_report()
