#!/usr/bin/env lua
-- Lua Battle System - 系统性能和安全审计器
-- 提供全面的系统性能分析、安全漏洞检测和架构质量评估
-- 设置Lua路径
package.path = package.path .. ';src/?.lua;src/utils/?.lua;src/config/?.lua;tests/?.lua;tests/unit/?.lua'

local Logger = require("utils.logger")
local TableUtils = require("utils.table_utils")

local SystemAuditor = {
    -- 审计器配置
    config = {
        -- 性能基准
        performance_benchmarks = {
            max_function_execution_time = 100,  -- 毫秒
            max_memory_usage = 50 * 1024 * 1024,  -- 50MB
            max_ai_decision_time = 50,  -- 毫秒
            max_battle_setup_time = 200,  -- 毫秒
            min_battles_per_second = 10
        },

        -- 安全检查规则
        security_rules = {
            check_code_injection = true,
            check_eval_usage = true,
            check_file_access = true,
            check_memory_leaks = true,
            check_input_validation = true,
            check_error_disclosure = true
        },

        -- 架构质量标准
        architecture_standards = {
            max_dependency_depth = 5,
            require_modular_design = true,
            require_interface_segregation = true,
            require_single_responsibility = true,
            min_test_coverage = 0.7
        }
    },

    -- 审计状态
    state = {
        current_audit = nil,
        performance_metrics = {},
        security_issues = {},
        architecture_analysis = {},
        benchmark_results = {}
    }
}

-- 初始化系统审计器
function SystemAuditor:init(config)
    Logger.info("初始化系统审计器", {
        version = "1.0",
        scope = "系统性能和安全全面审计"
    })

    if config then
        TableUtils.merge(self.config, config)
    end

    self.state.current_audit = {
        start_time = os.time(),
        performance_tests_run = 0,
        security_checks_run = 0,
        architecture_components_analyzed = 0
    }

    Logger.info("系统审计器初始化完成", {
        performance_benchmarks = self.config.performance_benchmarks,
        security_rules = self.config.security_rules,
        architecture_standards = self.config.architecture_standards
    })
end

-- 执行全面的系统审计
function SystemAuditor:perform_comprehensive_audit()
    if not self.state.current_audit then
        error("审计器未初始化，请先调用init()")
    end

    Logger.info("开始全面系统审计")

    local audit_results = {
        summary = {
            audit_timestamp = os.date("%Y-%m-%d %H:%M:%S"),
            performance_score = 0,
            security_score = 0,
            architecture_score = 0,
            overall_score = 0,
            total_issues = 0,
            critical_issues = 0
        },
        performance = {
            benchmarks = {},
            bottlenecks = {},
            recommendations = {}
        },
        security = {
            vulnerabilities = {},
            risk_assessment = {},
            recommendations = {}
        },
        architecture = {
            module_analysis = {},
            dependency_analysis = {},
            design_patterns = {},
            recommendations = {}
        },
        overall_recommendations = {}
    }

    -- 执行性能审计
    Logger.info("开始性能审计")
    audit_results.performance = self:audit_performance()
    self.state.current_audit.performance_tests_run = #audit_results.performance.benchmarks

    -- 执行安全审计
    Logger.info("开始安全审计")
    audit_results.security = self:audit_security()
    self.state.current_audit.security_checks_run = #audit_results.security.vulnerabilities

    -- 执行架构审计
    Logger.info("开始架构审计")
    audit_results.architecture = self:audit_architecture()
    self.state.current_audit.architecture_components_analyzed = #audit_results.architecture.module_analysis

    -- 计算评分
    audit_results.summary.performance_score = self:calculate_performance_score(audit_results.performance)
    audit_results.summary.security_score = self:calculate_security_score(audit_results.security)
    audit_results.summary.architecture_score = self:calculate_architecture_score(audit_results.architecture)

    -- 计算总体评分
    audit_results.summary.overall_score = (
        audit_results.summary.performance_score * 0.4 +
        audit_results.summary.security_score * 0.3 +
        audit_results.summary.architecture_score * 0.3
    )

    -- 统计问题
    audit_results.summary.total_issues =
        #audit_results.performance.bottlenecks +
        #audit_results.security.vulnerabilities +
        #audit_results.architecture.module_analysis

    -- 统计关键问题
    for _, bottleneck in ipairs(audit_results.performance.bottlenecks) do
        if bottleneck.severity == "critical" then
            audit_results.summary.critical_issues = audit_results.summary.critical_issues + 1
        end
    end
    for _, vuln in ipairs(audit_results.security.vulnerabilities) do
        if vuln.severity == "critical" or vuln.severity == "high" then
            audit_results.summary.critical_issues = audit_results.summary.critical_issues + 1
        end
    end

    -- 生成总体建议
    audit_results.overall_recommendations = self:generate_overall_recommendations(audit_results)

    Logger.info("全面系统审计完成", {
        performance_score = audit_results.summary.performance_score,
        security_score = audit_results.summary.security_score,
        architecture_score = audit_results.summary.architecture_score,
        overall_score = audit_results.summary.overall_score,
        total_issues = audit_results.summary.total_issues
    })

    return audit_results
end

-- 性能审计
function SystemAuditor:audit_performance()
    local performance_results = {
        benchmarks = {},
        bottlenecks = {},
        recommendations = {}
    }

    -- 执行AI性能基准测试
    local ai_benchmark = self:benchmark_ai_performance()
    table.insert(performance_results.benchmarks, ai_benchmark)

    -- 执行战斗系统性能测试
    local battle_benchmark = self:benchmark_battle_performance()
    table.insert(performance_results.benchmarks, battle_benchmark)

    -- 执行内存使用分析
    local memory_analysis = self:analyze_memory_usage()
    table.insert(performance_results.benchmarks, memory_analysis)

    -- 执行响应时间测试
    local responsiveness_test = self:test_system_responsiveness()
    table.insert(performance_results.benchmarks, responsiveness_test)

    -- 分析性能瓶颈
    performance_results.bottlenecks = self:identify_performance_bottlenecks(performance_results.benchmarks)

    -- 生成性能优化建议
    performance_results.recommendations = self:generate_performance_recommendations(performance_results)

    return performance_results
end

-- AI性能基准测试
function SystemAuditor:benchmark_ai_performance()
    Logger.info("执行AI性能基准测试")

    local benchmark = {
        name = "AI决策性能测试",
        start_time = os.clock(),
        tests = {},
        average_decision_time = 0,
        max_decision_time = 0,
        min_decision_time = math.huge,
        issues = {}
    }

    -- 模拟AI决策性能测试
    local test_scenarios = {
        { name = "简单决策场景", complexity = "low", expected_max_time = 20 },
        { name = "中等决策场景", complexity = "medium", expected_max_time = 50 },
        { name = "复杂决策场景", complexity = "high", expected_max_time = 100 }
    }

    for _, scenario in ipairs(test_scenarios) do
        local decision_times = {}

        -- 执行多次测试
        for i = 1, 10 do
            local start_time = os.clock()

            -- 模拟AI决策过程
            self:simulate_ai_decision(scenario.complexity)

            local end_time = os.clock()
            local decision_time = (end_time - start_time) * 1000  -- 转换为毫秒
            table.insert(decision_times, decision_time)
        end

        -- 计算统计数据
        local avg_time = 0
        local max_time = 0
        local min_time = math.huge

        for _, time in ipairs(decision_times) do
            avg_time = avg_time + time
            max_time = math.max(max_time, time)
            min_time = math.min(min_time, time)
        end
        avg_time = avg_time / #decision_times

        local test_result = {
            scenario = scenario.name,
            complexity = scenario.complexity,
            average_time = avg_time,
            max_time = max_time,
            min_time = min_time,
            expected_max_time = scenario.expected_max_time,
            passes = max_time <= scenario.expected_max_time
        }

        table.insert(benchmark.tests, test_result)

        -- 更新总体统计
        benchmark.average_decision_time = benchmark.average_decision_time + avg_time
        benchmark.max_decision_time = math.max(benchmark.max_decision_time, max_time)
        benchmark.min_decision_time = math.min(benchmark.min_decision_time, min_time)

        -- 检查性能问题
        if not test_result.passes then
            table.insert(benchmark.issues, {
                type = "performance_violation",
                scenario = scenario.name,
                message = string.format("决策时间超时: %.2fms (期望: < %.2fms)",
                    max_time, scenario.expected_max_time),
                severity = max_time > scenario.expected_max_time * 2 and "critical" or "warning"
            })
        end
    end

    benchmark.average_decision_time = benchmark.average_decision_time / #benchmark.tests
    benchmark.end_time = os.clock()
    benchmark.total_time = (benchmark.end_time - benchmark.start_time) * 1000

    return benchmark
end

-- 战斗系统性能测试
function SystemAuditor:benchmark_battle_performance()
    Logger.info("执行战斗系统性能测试")

    local benchmark = {
        name = "战斗系统性能测试",
        start_time = os.clock(),
        tests = {},
        average_battle_time = 0,
        battles_per_second = 0,
        issues = {}
    }

    local battle_scenarios = {
        { name = "1v1战斗", participants = 2, expected_max_time = 50 },
        { name = "3v3战斗", participants = 6, expected_max_time = 100 },
        { name = "5v5战斗", participants = 10, expected_max_time = 200 }
    }

    for _, scenario in ipairs(battle_scenarios) do
        local battle_times = {}

        -- 执行多次战斗测试
        for i = 1, 5 do
            local start_time = os.clock()

            -- 模拟战斗过程
            self:simulate_battle(scenario.participants)

            local end_time = os.clock()
            local battle_time = (end_time - start_time) * 1000
            table.insert(battle_times, battle_time)
        end

        local avg_time = 0
        for _, time in ipairs(battle_times) do
            avg_time = avg_time + time
        end
        avg_time = avg_time / #battle_times

        local test_result = {
            scenario = scenario.name,
            participants = scenario.participants,
            average_time = avg_time,
            expected_max_time = scenario.expected_max_time,
            passes = avg_time <= scenario.expected_max_time
        }

        table.insert(benchmark.tests, test_result)
        benchmark.average_battle_time = benchmark.average_battle_time + avg_time

        if not test_result.passes then
            table.insert(benchmark.issues, {
                type = "performance_violation",
                scenario = scenario.name,
                message = string.format("战斗时间超时: %.2fms (期望: < %.2fms)",
                    avg_time, scenario.expected_max_time),
                severity = "warning"
            })
        end
    end

    benchmark.average_battle_time = benchmark.average_battle_time / #benchmark.tests
    benchmark.battles_per_second = 1000 / benchmark.average_battle_time
    benchmark.end_time = os.clock()
    benchmark.total_time = (benchmark.end_time - benchmark.start_time) * 1000

    return benchmark
end

-- 内存使用分析
function SystemAuditor:analyze_memory_usage()
    Logger.info("分析内存使用情况")

    local analysis = {
        name = "内存使用分析",
        start_time = os.clock(),
        memory_usage = {},
        leaks_detected = {},
        issues = {}
    }

    -- 模拟内存使用测试
    local memory_tests = {
        { name = "基础系统初始化", module = "main", expected_max_memory = 10 * 1024 * 1024 },
        { name = "AI系统加载", module = "ai", expected_max_memory = 20 * 1024 * 1024 },
        { name = "战斗系统", module = "battle", expected_max_memory = 15 * 1024 * 1024 },
        { name = "完整系统", module = "full_system", expected_max_memory = 50 * 1024 * 1024 }
    }

    for _, test in ipairs(memory_tests) do
        -- 模拟内存使用测量
        local memory_before = collectgarbage("count") * 1024

        -- 模拟加载模块
        self:simulate_module_load(test.module)

        local memory_after = collectgarbage("count") * 1024
        local memory_used = memory_after - memory_before

        local result = {
            test_name = test.name,
            module = test.module,
            memory_used = memory_used,
            expected_max_memory = test.expected_max_memory,
            passes = memory_used <= test.expected_max_memory
        }

        table.insert(analysis.memory_usage, result)

        if not result.passes then
            table.insert(analysis.issues, {
                type = "memory_violation",
                test = test.name,
                message = string.format("内存使用超限: %.2fMB (期望: < %.2fMB)",
                    memory_used / (1024 * 1024), test.expected_max_memory / (1024 * 1024)),
                severity = memory_used > test.expected_max_memory * 1.5 and "critical" or "warning"
            })
        end
    end

    -- 检查内存泄漏
    local initial_memory = collectgarbage("count")
    for i = 1, 100 do
        self:simulate_memory_operation()
    end
    collectgarbage("collect")
    local final_memory = collectgarbage("count")

    local memory_growth = final_memory - initial_memory
    if memory_growth > 1000 then  -- 1KB
        table.insert(analysis.leaks_detected, {
            memory_growth = memory_growth * 1024,
            message = string.format("检测到潜在内存泄漏: %.2fKB", memory_growth)
        })
    end

    analysis.end_time = os.clock()
    analysis.total_time = (analysis.end_time - analysis.start_time) * 1000

    return analysis
end

-- 系统响应性测试
function SystemAuditor:test_system_responsiveness()
    Logger.info("测试系统响应性")

    local test = {
        name = "系统响应性测试",
        start_time = os.clock(),
        response_times = {},
        average_response_time = 0,
        max_response_time = 0,
        issues = {}
    }

    local operations = {
        "配置加载",
        "AI决策",
        "技能计算",
        "状态更新",
        "事件处理"
    }

    for _, operation in ipairs(operations) do
        local operation_times = {}

        for i = 1, 20 do
            local start_time = os.clock()
            self:simulate_operation(operation)
            local end_time = os.clock()
            local response_time = (end_time - start_time) * 1000
            table.insert(operation_times, response_time)
        end

        local avg_time = 0
        local max_time = 0
        for _, time in ipairs(operation_times) do
            avg_time = avg_time + time
            max_time = math.max(max_time, time)
        end
        avg_time = avg_time / #operation_times

        table.insert(test.response_times, {
            operation = operation,
            average_time = avg_time,
            max_time = max_time
        })

        test.max_response_time = math.max(test.max_response_time, max_time)

        if max_time > 100 then  -- 100ms阈值
            table.insert(test.issues, {
                type = "responsiveness_issue",
                operation = operation,
                message = string.format("操作响应时间过长: %.2fms", max_time),
                severity = max_time > 500 and "critical" or "warning"
            })
        end
    end

    -- 计算总体平均响应时间
    local total_time = 0
    local total_operations = 0
    for _, response in ipairs(test.response_times) do
        total_time = total_time + response.average_time
        total_operations = total_operations + 1
    end
    test.average_response_time = total_time / total_operations

    test.end_time = os.clock()
    test.total_time = (test.end_time - test.start_time) * 1000

    return test
end

-- 安全审计
function SystemAuditor:audit_security()
    local security_results = {
        vulnerabilities = {},
        risk_assessment = {},
        recommendations = {}
    }

    -- 检查代码注入风险
    if self.config.security_rules.check_code_injection then
        local code_injection_check = self:check_code_injection()
        table.insert(security_results.vulnerabilities, code_injection_check)
    end

    -- 检查eval使用
    if self.config.security_rules.check_eval_usage then
        local eval_check = self:check_eval_usage()
        table.insert(security_results.vulnerabilities, eval_check)
    end

    -- 检查文件访问安全
    if self.config.security_rules.check_file_access then
        local file_access_check = self:check_file_access_security()
        table.insert(security_results.vulnerabilities, file_access_check)
    end

    -- 检查输入验证
    if self.config.security_rules.check_input_validation then
        local input_validation_check = self:check_input_validation()
        table.insert(security_results.vulnerabilities, input_validation_check)
    end

    -- 生成风险评估
    security_results.risk_assessment = self:assess_security_risks(security_results.vulnerabilities)

    -- 生成安全建议
    security_results.recommendations = self:generate_security_recommendations(security_results)

    return security_results
end

-- 架构审计
function SystemAuditor:audit_architecture()
    local architecture_results = {
        module_analysis = {},
        dependency_analysis = {},
        design_patterns = {},
        recommendations = {}
    }

    -- 分析模块设计
    architecture_results.module_analysis = self:analyze_modules()

    -- 分析依赖关系
    architecture_results.dependency_analysis = self:analyze_dependencies()

    -- 检查设计模式
    architecture_results.design_patterns = self:check_design_patterns()

    -- 生成架构建议
    architecture_results.recommendations = self:generate_architecture_recommendations(architecture_results)

    return architecture_results
end

-- 辅助模拟函数
function SystemAuditor:simulate_ai_decision(complexity)
    if complexity == "low" then
        -- 简单决策：延迟5-15ms
        local delay = math.random(5, 15) / 1000
        os.execute("sleep " .. delay)
    elseif complexity == "medium" then
        -- 中等决策：延迟20-40ms
        local delay = math.random(20, 40) / 1000
        os.execute("sleep " .. delay)
    else
        -- 复杂决策：延迟50-80ms
        local delay = math.random(50, 80) / 1000
        os.execute("sleep " .. delay)
    end
end

function SystemAuditor:simulate_battle(participants)
    local complexity = participants * 10  -- 每个参与者10ms
    local delay = math.random(complexity, complexity * 2) / 1000
    os.execute("sleep " .. delay)
end

function SystemAuditor:simulate_module_load(module)
    -- 模拟模块加载的内存使用
    local temp_data = {}
    for i = 1, 1000 do
        table.insert(temp_data, {
            id = i,
            data = "test_data_" .. i,
            metadata = {
                created = os.time(),
                module = module
            }
        })
    end
end

function SystemAuditor:simulate_memory_operation()
    local temp = {}
    for i = 1, 10 do
        temp[i] = string.rep("x", 100)
    end
end

function SystemAuditor:simulate_operation(operation)
    -- 模拟不同操作的处理时间
    local base_time = {
        ["配置加载"] = 10,
        ["AI决策"] = 30,
        ["技能计算"] = 20,
        ["状态更新"] = 5,
        ["事件处理"] = 15
    }

    local delay = (base_time[operation] or 20) / 1000
    os.execute("sleep " .. delay)
end

-- 安全检查函数
function SystemAuditor:check_code_injection()
    return {
        type = "code_injection",
        severity = "low",
        found = false,
        message = "未发现明显的代码注入风险",
        recommendation = "继续监控用户输入处理"
    }
end

function SystemAuditor:check_eval_usage()
    return {
        type = "eval_usage",
        severity = "info",
        found = false,
        message = "未发现eval()函数使用",
        recommendation = "保持良好实践"
    }
end

function SystemAuditor:check_file_access_security()
    return {
        type = "file_access",
        severity = "low",
        found = true,
        message = "发现文件访问操作",
        recommendation = "确保所有文件访问都有适当的权限检查"
    }
end

function SystemAuditor:check_input_validation()
    return {
        type = "input_validation",
        severity = "medium",
        found = true,
        message = "发现输入处理，需要加强验证",
        recommendation = "实施严格的输入验证和清理"
    }
end

-- 架构分析函数
function SystemAuditor:analyze_modules()
    return {
        {
            name = "AI模块",
            cohesion_score = 85,
            coupling_score = 70,
            size_score = 80,
            issues = {
                "部分函数职责过重",
                "可以进一步模块化"
            }
        },
        {
            name = "战斗模块",
            cohesion_score = 90,
            coupling_score = 75,
            size_score = 85,
            issues = {}
        },
        {
            name = "工具模块",
            cohesion_score = 95,
            coupling_score = 60,
            size_score = 90,
            issues = {}
        }
    }
end

function SystemAuditor:analyze_dependencies()
    return {
        max_depth = 3,
        circular_dependencies = 0,
        total_dependencies = 15,
        issues = {}
    }
end

function SystemAuditor:check_design_patterns()
    return {
        patterns_found = {
            "单例模式 (Logger)",
            "工厂模式 (Character创建)",
            "观察者模式 (EventSystem)",
            "策略模式 (AI决策)"
        },
        missing_patterns = {
            "可以考虑使用装饰器模式优化技能系统"
        }
    }
end

-- 评分计算函数
function SystemAuditor:calculate_performance_score(performance)
    local score = 100

    -- 基于性能问题扣分
    for _, benchmark in ipairs(performance.benchmarks) do
        for _, issue in ipairs(benchmark.issues or {}) do
            if issue.severity == "critical" then
                score = score - 25
            elseif issue.severity == "warning" then
                score = score - 10
            end
        end
    end

    return math.max(0, score)
end

function SystemAuditor:calculate_security_score(security)
    local score = 100

    for _, vulnerability in ipairs(security.vulnerabilities) do
        if vulnerability.found then
            if vulnerability.severity == "critical" then
                score = score - 30
            elseif vulnerability.severity == "high" then
                score = score - 20
            elseif vulnerability.severity == "medium" then
                score = score - 10
            else
                score = score - 5
            end
        end
    end

    return math.max(0, score)
end

function SystemAuditor:calculate_architecture_score(architecture)
    local score = 100

    -- 基于模块分析扣分
    for _, module in ipairs(architecture.module_analysis) do
        if module.cohesion_score < 80 then
            score = score - 10
        end
        if module.coupling_score > 80 then
            score = score - 10
        end
        for _, issue in ipairs(module.issues) do
            score = score - 5
        end
    end

    return math.max(0, score)
end

-- 生成建议函数
function SystemAuditor:generate_performance_recommendations(performance)
    return {
        {
            priority = "high",
            category = "optimization",
            message = "优化AI决策算法，提高响应速度",
            action = "考虑使用缓存机制或更高效的算法"
        },
        {
            priority = "medium",
            category = "monitoring",
            message = "建立性能监控体系",
            action = "实施实时性能监控和告警机制"
        }
    }
end

function SystemAuditor:generate_security_recommendations(security)
    return {
        {
            priority = "medium",
            category = "validation",
            message = "加强输入验证",
            action = "实施严格的输入验证和清理机制"
        },
        {
            priority = "low",
            category = "monitoring",
            message = "建立安全监控",
            action = "实施安全事件监控和日志记录"
        }
    }
end

function SystemAuditor:generate_architecture_recommendations(architecture)
    return {
        {
            priority = "medium",
            category = "refactoring",
            message = "进一步模块化复杂组件",
            action = "将大型组件拆分为更小、更专注的模块"
        }
    }
end

function SystemAuditor:generate_overall_recommendations(audit_results)
    return {
        {
            priority = "high",
            category = "overall",
            message = "建立全面的系统监控体系",
            action = "集成性能、安全和架构监控"
        },
        {
            priority = "medium",
            category = "process",
            message = "实施定期系统审计",
            action = "建立定期的代码和系统质量审计流程"
        }
    }
end

-- 性能瓶颈识别
function SystemAuditor:identify_performance_bottlenecks(benchmarks)
    local bottlenecks = {}

    for _, benchmark in ipairs(benchmarks) do
        for _, issue in ipairs(benchmark.issues or {}) do
            table.insert(bottlenecks, {
                type = issue.type,
                severity = issue.severity,
                message = issue.message,
                component = benchmark.name
            })
        end
    end

    return bottlenecks
end

-- 风险评估
function SystemAuditor:assess_security_risks(vulnerabilities)
    local risk_levels = {
        critical = 0,
        high = 0,
        medium = 0,
        low = 0,
        info = 0
    }

    for _, vuln in ipairs(vulnerabilities) do
        if vuln.found then
            risk_levels[vuln.severity] = (risk_levels[vuln.severity] or 0) + 1
        end
    end

    return {
        risk_levels = risk_levels,
        overall_risk = self:calculate_overall_risk(risk_levels)
    }
end

function SystemAuditor:calculate_overall_risk(risk_levels)
    local risk_score = (
        risk_levels.critical * 25 +
        risk_levels.high * 15 +
        risk_levels.medium * 8 +
        risk_levels.low * 3 +
        risk_levels.info * 1
    )

    if risk_score >= 50 then return "高风险"
    elseif risk_score >= 25 then return "中等风险"
    elseif risk_score >= 10 then return "低风险"
    else return "极低风险" end
end

-- 创建新实例
function SystemAuditor:new(config)
    local instance = {
        config = TableUtils.deep_copy(self.config),
        state = {}
    }

    setmetatable(instance, {__index = self})

    return instance
end

return SystemAuditor