using Test
using Pkg
using Statistics  # Add Statistics for mean function
Pkg.activate("..")

# Include the main package
include("../src/ClaudeCodeTheory.jl")
using .ClaudeCodeTheory

@testset "ClaudeCodeTheory.jl Tests" begin
    
    @testset "Core Types" begin
        # Test basic type construction
        fs = FileSystemState(Dict{String,String}(), Set{String}(), Dict{String,UInt8}(), Dict{String,Float64}())
        @test isa(fs, FileSystemState)
        
        # Test quality metrics
        quality = QualityMetrics(0.8, 0.7, 0.9, 0.6)
        @test quality.correctness == 0.8
        @test quality.readability == 0.7
        
        # Test utility calculation
        weights = [1.0, 0.5, 0.3, 0.2]
        utility = MultiObjectiveUtility(weights, 0.8, 2.0, 1.5, 0.1)
        @test utility.utility_value ≈ 0.8 * 1.0 - 2.0 * 0.5 - 1.5 * 0.3 - 0.1 * 0.2
    end
    
    @testset "LinUCB Agent" begin
        agent = LinUCBAgent(5, 3, 1.0)  # 5D features, 3 arms
        @test agent.d == 5
        @test agent.k == 3
        @test length(agent.arm_counts) == 3
        
        # Test arm selection
        context = [1.0, 0.5, -0.3, 0.8, 0.2]
        arm, confidence = select_arm(agent, context)
        @test 1 <= arm <= 3
        @test confidence >= 0
        
        # Test update
        update_bandit!(agent, arm, 0.7, context)
        @test agent.total_rounds == 1
        @test agent.arm_counts[arm] == 1
    end
    
    @testset "Thompson Sampling Agent" begin
        agent = ThompsonSamplingAgent(4, 2)  # 4D features, 2 arms
        @test agent.d == 4
        @test agent.k == 2
        
        context = [0.5, -0.2, 1.1, 0.3]
        arm, uncertainty = select_arm(agent, context)
        @test 1 <= arm <= 2
        @test uncertainty >= 0
        
        update_bandit!(agent, arm, 0.3, context)
        @test agent.total_rounds == 1
    end
    
    @testset "Feature Extraction" begin
        extractor = ContextFeatureExtractor(8)
        @test extractor.feature_dim == 8
        @test length(extractor.feature_names) == 8
        
        # Test normalization update
        features = [1.0, 2.0, -1.0, 0.5, 3.0, -0.5, 0.8, 1.2]
        update_normalization!(extractor, features)
        
        normalized = normalize_features(extractor, features)
        @test length(normalized) == 8
        @test all(-5.0 .<= normalized .<= 5.0)  # Check clamping
    end
    
    @testset "Utility Functions" begin
        # Test softmax
        x = [1.0, 2.0, 0.5]
        probs = softmax(x, 1.0)
        @test length(probs) == 3
        @test sum(probs) ≈ 1.0
        @test all(probs .>= 0)
        
        # Test confidence interval
        samples = [1.0, 2.0, 1.5, 2.2, 1.8, 2.1, 1.9]
        lower, upper = confidence_interval(samples, 0.95)
        @test lower < upper
        @test lower <= mean(samples) <= upper
        
        # Test Pareto dominance
        obj1 = [3.0, 2.0, 1.0]  
        obj2 = [2.0, 2.0, 1.0]
        @test pareto_dominates(obj1, obj2)
        @test !pareto_dominates(obj2, obj1)
    end
    
    @testset "Tool System" begin
        # Test tool creation
        tool = Tool(
            "test_tool", FILE_TOOLS, "Test tool",
            Dict("param1" => String), Dict("result" => Bool),
            SAFE, 1.0, 0.5, true, Set{String}()
        )
        @test tool.name == "test_tool"
        @test tool.category == FILE_TOOLS
        @test tool.safety_level == SAFE
        
        # Test action space
        tools = [tool]
        action_space = ClaudeCodeActionSpace(tools, 2)
        @test length(action_space.tools) == 1
        @test haskey(action_space.tool_index, "test_tool")
        
        # Test action creation
        params = Dict{String, Any}("param1" => "test_value")  # Use Any type
        action = create_action(action_space, "test_tool", params)
        @test action !== nothing
        @test action.tool.name == "test_tool"
        @test action.parameters["param1"] == "test_value"
    end
end