"""
Thompson Sampling implementation for contextual bandit tool selection.

This module implements Thompson Sampling as described in Algorithm 3.2 of the
theoretical framework, providing Bayesian exploration with O(d√T) regret bounds.
"""

using LinearAlgebra
using Random
using Distributions
using Statistics

# ============================================================================
# Thompson Sampling Implementation
# ============================================================================

"""
    ThompsonSamplingAgent <: AbstractBandit

Contextual bandit using Thompson Sampling with Gaussian priors.
Implements Bayesian exploration with optimal regret bounds.
"""
mutable struct ThompsonSamplingAgent <: AbstractBandit
    d::Int  # feature dimension
    k::Int  # number of arms (tools)
    
    # Prior parameters
    prior_mean::Vector{Float64}
    prior_precision::Matrix{Float64}  # inverse covariance
    noise_precision::Float64          # inverse noise variance
    
    # Per-arm posterior parameters
    posterior_mean::Vector{Vector{Float64}}       # μ_i for each arm
    posterior_precision::Vector{Matrix{Float64}}  # Λ_i for each arm
    
    # Sampling and tracking
    rng::MersenneTwister
    total_rounds::Int
    arm_counts::Vector{Int}
    cumulative_reward::Float64
    
    # Performance tracking
    sampled_parameters::Vector{Vector{Vector{Float64}}}  # history of sampled θ
    posterior_uncertainties::Vector{Vector{Float64}}      # trace of posterior covariance
    
    function ThompsonSamplingAgent(d::Int, k::Int; 
                                  prior_mean::Vector{Float64} = zeros(d),
                                  prior_variance::Float64 = 1.0,
                                  noise_variance::Float64 = 1.0,
                                  random_seed::Int = 1234)
        
        # Initialize prior
        prior_precision_matrix = I(d) / prior_variance
        noise_prec = 1.0 / noise_variance
        
        # Initialize posteriors to priors
        posterior_means = [copy(prior_mean) for _ in 1:k]
        posterior_precisions = [copy(prior_precision_matrix) for _ in 1:k]
        
        rng = MersenneTwister(random_seed)
        
        new(d, k, prior_mean, prior_precision_matrix, noise_prec,
            posterior_means, posterior_precisions, rng, 
            0, zeros(Int, k), 0.0, Vector{Vector{Vector{Float64}}}(), Vector{Vector{Float64}}())
    end
end

"""
    select_arm(agent::ThompsonSamplingAgent, context::Vector{Float64}) -> Tuple{Int, Float64}

Select arm using Thompson Sampling.
Returns (arm_index, posterior_uncertainty).
"""
function select_arm(agent::ThompsonSamplingAgent, context::Vector{Float64})
    @assert length(context) == agent.d "Context dimension mismatch"
    
    sampled_rewards = zeros(agent.k)
    sampled_params = Vector{Vector{Float64}}(undef, agent.k)
    uncertainties = zeros(agent.k)
    
    for i in 1:agent.k
        # Sample from posterior distribution
        θ_sample = sample_posterior(agent, i)
        sampled_params[i] = θ_sample
        
        # Compute expected reward under sampled parameters
        sampled_rewards[i] = dot(context, θ_sample)
        
        # Compute posterior uncertainty
        try
            posterior_cov = inv(agent.posterior_precision[i])
            uncertainties[i] = sqrt(dot(context, posterior_cov * context))
        catch e
            @warn "Numerical issue computing uncertainty for arm $i" exception=e
            uncertainties[i] = 1.0  # Fallback uncertainty
        end
    end
    
    # Select arm with highest sampled reward
    selected_arm = argmax(sampled_rewards)
    
    # Store sampling history
    push!(agent.sampled_parameters, copy(sampled_params))
    push!(agent.posterior_uncertainties, copy(uncertainties))
    
    return selected_arm, uncertainties[selected_arm]
end

"""
    sample_posterior(agent::ThompsonSamplingAgent, arm::Int) -> Vector{Float64}

Sample from posterior distribution for given arm.
"""
function sample_posterior(agent::ThompsonSamplingAgent, arm::Int)
    try
        # Compute posterior covariance
        posterior_cov = inv(agent.posterior_precision[arm])
        
        # Sample from multivariate normal
        return rand(agent.rng, MvNormal(agent.posterior_mean[arm], posterior_cov))
        
    catch e
        @warn "Failed to sample from posterior for arm $arm, using prior" exception=e
        
        # Fallback to prior
        prior_cov = inv(agent.prior_precision)
        return rand(agent.rng, MvNormal(agent.prior_mean, prior_cov))
    end
end

"""
    update_bandit!(agent::ThompsonSamplingAgent, arm::Int, reward::Float64, context::Vector{Float64})

Update posterior distribution with observed reward.
"""
function update_bandit!(agent::ThompsonSamplingAgent, arm::Int, reward::Float64, context::Vector{Float64})
    @assert 1 <= arm <= agent.k "Invalid arm index"
    @assert length(context) == agent.d "Context dimension mismatch"
    
    # Update posterior using conjugate Bayesian update
    # New precision = old precision + noise_precision * x x^T
    agent.posterior_precision[arm] += agent.noise_precision * (context * context')
    
    # Update posterior mean
    # New mean = inv(new_precision) * (old_precision * old_mean + noise_precision * reward * x)
    try
        old_precision_mean = agent.posterior_precision[arm] - agent.noise_precision * (context * context')
        weighted_old = old_precision_mean * agent.posterior_mean[arm]
        weighted_new = agent.noise_precision * reward * context
        
        agent.posterior_mean[arm] = inv(agent.posterior_precision[arm]) * (weighted_old + weighted_new)
        
    catch e
        @warn "Numerical issue in posterior update for arm $arm" exception=e
        
        # Fallback update using simple average
        n = agent.arm_counts[arm] + 1
        old_mean = agent.posterior_mean[arm]
        direction = reward * context - dot(context, old_mean) * context
        agent.posterior_mean[arm] += direction / (n + 1)
    end
    
    # Update counters
    agent.total_rounds += 1
    agent.arm_counts[arm] += 1
    agent.cumulative_reward += reward
end

"""
    get_regret_bound(agent::ThompsonSamplingAgent, horizon::Int, confidence::Float64 = 0.05) -> Float64

Compute theoretical regret bound for Thompson Sampling.
Returns O(d√T) bound from Theorem 3.2.
"""
function get_regret_bound(agent::ThompsonSamplingAgent, horizon::Int, confidence::Float64 = 0.05)
    if horizon <= 0
        return 0.0
    end
    
    d = agent.d
    
    # Theoretical bound: O(d√T)
    # The constant depends on problem-dependent parameters
    c = sqrt(2 * log(1/confidence))  # Confidence-dependent constant
    bound = c * d * sqrt(horizon)
    
    return bound
end

"""
    compute_information_gain(agent::ThompsonSamplingAgent, context::Vector{Float64}) -> Vector{Float64}

Compute expected information gain for each arm.
"""
function compute_information_gain(agent::ThompsonSamplingAgent, context::Vector{Float64})
    information_gains = zeros(agent.k)
    
    for i in 1:agent.k
        try
            # Current posterior entropy (up to constants)
            current_logdet = logdet(agent.posterior_precision[i])
            
            # Expected posterior precision after observing reward
            expected_precision = agent.posterior_precision[i] + agent.noise_precision * (context * context')
            expected_logdet = logdet(expected_precision)
            
            # Information gain = change in log determinant / 2
            information_gains[i] = (expected_logdet - current_logdet) / 2
            
        catch e
            @debug "Could not compute information gain for arm $i" exception=e
            information_gains[i] = 0.0
        end
    end
    
    return information_gains
end

"""
    compute_posterior_variance(agent::ThompsonSamplingAgent, arm::Int, context::Vector{Float64}) -> Float64

Compute posterior predictive variance for given arm and context.
"""
function compute_posterior_variance(agent::ThompsonSamplingAgent, arm::Int, context::Vector{Float64})
    try
        # Posterior parameter covariance
        param_cov = inv(agent.posterior_precision[arm])
        
        # Predictive variance = noise variance + context^T * param_cov * context
        param_variance = dot(context, param_cov * context)
        noise_variance = 1.0 / agent.noise_precision
        
        return noise_variance + param_variance
        
    catch e
        @debug "Could not compute posterior variance for arm $arm" exception=e
        return 1.0  # Fallback
    end
end

"""
    get_arm_statistics(agent::ThompsonSamplingAgent, arm::Int) -> NamedTuple

Get detailed statistics for a specific arm.
"""
function get_arm_statistics(agent::ThompsonSamplingAgent, arm::Int)
    @assert 1 <= arm <= agent.k "Invalid arm index"
    
    try
        posterior_cov = inv(agent.posterior_precision[arm])
        posterior_std = sqrt.(diag(posterior_cov))
        
        return (
            posterior_mean = copy(agent.posterior_mean[arm]),
            posterior_covariance = posterior_cov,
            posterior_std = posterior_std,
            num_selections = agent.arm_counts[arm],
            selection_frequency = agent.arm_counts[arm] / max(agent.total_rounds, 1)
        )
        
    catch e
        @warn "Could not compute statistics for arm $arm" exception=e
        return (
            posterior_mean = copy(agent.posterior_mean[arm]),
            posterior_covariance = I(agent.d),
            posterior_std = ones(agent.d),
            num_selections = agent.arm_counts[arm],
            selection_frequency = agent.arm_counts[arm] / max(agent.total_rounds, 1)
        )
    end
end

# ============================================================================
# Advanced Thompson Sampling Variants
# ============================================================================

"""
    ThompsonSamplingWithResets <: AbstractBandit

Thompson Sampling with periodic resets to handle non-stationary environments.
"""
mutable struct ThompsonSamplingWithResets <: AbstractBandit
    base_agent::ThompsonSamplingAgent
    reset_frequency::Int
    rounds_since_reset::Int
    reset_threshold::Float64  # performance threshold for triggering resets
    performance_window::Vector{Float64}
    window_size::Int
    
    function ThompsonSamplingWithResets(d::Int, k::Int; 
                                       reset_frequency::Int = 1000,
                                       reset_threshold::Float64 = 0.1,
                                       window_size::Int = 100,
                                       kwargs...)
        base_agent = ThompsonSamplingAgent(d, k; kwargs...)
        new(base_agent, reset_frequency, 0, reset_threshold, Float64[], window_size)
    end
end

"""
    select_arm(agent::ThompsonSamplingWithResets, context::Vector{Float64}) -> Tuple{Int, Float64}

Select arm with periodic reset capability.
"""
function select_arm(agent::ThompsonSamplingWithResets, context::Vector{Float64})
    # Check if reset is needed
    check_and_reset!(agent)
    
    # Use base agent for selection
    return select_arm(agent.base_agent, context)
end

"""
    update_bandit!(agent::ThompsonSamplingWithResets, arm::Int, reward::Float64, context::Vector{Float64})

Update with reset monitoring.
"""
function update_bandit!(agent::ThompsonSamplingWithResets, arm::Int, reward::Float64, context::Vector{Float64})
    # Update base agent
    update_bandit!(agent.base_agent, arm, reward, context)
    
    # Track performance
    push!(agent.performance_window, reward)
    if length(agent.performance_window) > agent.window_size
        popfirst!(agent.performance_window)
    end
    
    agent.rounds_since_reset += 1
end

"""
    check_and_reset!(agent::ThompsonSamplingWithResets)

Check if reset is needed and perform if necessary.
"""
function check_and_reset!(agent::ThompsonSamplingWithResets)
    should_reset = false
    
    # Time-based reset
    if agent.rounds_since_reset >= agent.reset_frequency
        should_reset = true
        @info "Performing time-based reset after $(agent.rounds_since_reset) rounds"
    end
    
    # Performance-based reset
    if length(agent.performance_window) >= agent.window_size
        recent_performance = mean(agent.performance_window)
        historical_performance = agent.base_agent.cumulative_reward / max(agent.base_agent.total_rounds, 1)
        
        if recent_performance < historical_performance - agent.reset_threshold
            should_reset = true
            @info "Performing performance-based reset (recent: $recent_performance, historical: $historical_performance)"
        end
    end
    
    if should_reset
        reset_agent!(agent)
    end
end

"""
    reset_agent!(agent::ThompsonSamplingWithResets)

Reset the agent to handle environment changes.
"""
function reset_agent!(agent::ThompsonSamplingWithResets)
    # Reset posteriors to priors (keeping learned structure partially)
    for i in 1:agent.base_agent.k
        # Partial reset: keep some information but increase uncertainty
        current_mean = agent.base_agent.posterior_mean[i]
        prior_precision = agent.base_agent.prior_precision
        
        # Interpolate between current posterior and prior
        λ = 0.3  # Keep 30% of learned information
        agent.base_agent.posterior_mean[i] = λ * current_mean + (1-λ) * agent.base_agent.prior_mean
        agent.base_agent.posterior_precision[i] = λ * agent.base_agent.posterior_precision[i] + (1-λ) * prior_precision
    end
    
    # Reset counters
    agent.rounds_since_reset = 0
    agent.performance_window = Float64[]
end

# ============================================================================
# Ensemble Thompson Sampling
# ============================================================================

"""
    EnsembleThompsonSampling <: AbstractBandit

Ensemble of Thompson Sampling agents for robust performance.
"""
struct EnsembleThompsonSampling <: AbstractBandit
    agents::Vector{ThompsonSamplingAgent}
    ensemble_size::Int
    voting_method::Symbol  # :majority, :weighted, :confidence
    
    function EnsembleThompsonSampling(d::Int, k::Int, ensemble_size::Int = 5; 
                                    voting_method::Symbol = :weighted)
        # Create diverse agents with different priors
        agents = ThompsonSamplingAgent[]
        
        for i in 1:ensemble_size
            # Vary prior parameters for diversity
            prior_var = 0.5 + 0.5 * (i / ensemble_size)  # [0.6, 1.0]
            noise_var = 0.8 + 0.4 * (i / ensemble_size)  # [0.8, 1.2]
            seed = 1000 + i
            
            agent = ThompsonSamplingAgent(d, k; 
                                        prior_variance = prior_var,
                                        noise_variance = noise_var, 
                                        random_seed = seed)
            push!(agents, agent)
        end
        
        new(agents, ensemble_size, voting_method)
    end
end

"""
    select_arm(ensemble::EnsembleThompsonSampling, context::Vector{Float64}) -> Tuple{Int, Float64}

Select arm using ensemble voting.
"""
function select_arm(ensemble::EnsembleThompsonSampling, context::Vector{Float64})
    # Get selections from all agents
    selections = [select_arm(agent, context) for agent in ensemble.agents]
    arms = [sel[1] for sel in selections]
    confidences = [1.0 / (sel[2] + 1e-6) for sel in selections]  # inverse uncertainty
    
    if ensemble.voting_method == :majority
        # Simple majority vote
        arm_votes = Dict{Int, Int}()
        for arm in arms
            arm_votes[arm] = get(arm_votes, arm, 0) + 1
        end
        selected_arm = argmax(arm_votes)[1]
        avg_confidence = mean(confidences)
        
    elseif ensemble.voting_method == :weighted
        # Confidence-weighted voting
        k = ensemble.agents[1].k
        weighted_votes = zeros(k)
        
        for (arm, confidence) in zip(arms, confidences)
            weighted_votes[arm] += confidence
        end
        
        selected_arm = argmax(weighted_votes)
        avg_confidence = mean(confidences)
        
    else  # :confidence
        # Select agent with highest confidence
        best_agent_idx = argmax(confidences)
        selected_arm = arms[best_agent_idx]
        avg_confidence = confidences[best_agent_idx]
    end
    
    return selected_arm, 1.0 / (avg_confidence + 1e-6)  # convert back to uncertainty
end

"""
    update_bandit!(ensemble::EnsembleThompsonSampling, arm::Int, reward::Float64, context::Vector{Float64})

Update all agents in the ensemble.
"""
function update_bandit!(ensemble::EnsembleThompsonSampling, arm::Int, reward::Float64, context::Vector{Float64})
    for agent in ensemble.agents
        update_bandit!(agent, arm, reward, context)
    end
end

"""
    get_regret_bound(ensemble::EnsembleThompsonSampling, horizon::Int, confidence::Float64 = 0.05) -> Float64

Compute ensemble regret bound (typically better than individual agents).
"""
function get_regret_bound(ensemble::EnsembleThompsonSampling, horizon::Int, confidence::Float64 = 0.05)
    # Ensemble typically has better performance than individual agents
    individual_bound = get_regret_bound(ensemble.agents[1], horizon, confidence)
    
    # Heuristic ensemble improvement factor
    improvement_factor = sqrt(ensemble.ensemble_size) / ensemble.ensemble_size
    
    return individual_bound * improvement_factor
end

# ============================================================================
# Utility Functions
# ============================================================================

"""
    compare_bandit_performance(agents::Vector{<:AbstractBandit}, contexts::Vector{Vector{Float64}},
                             rewards_function::Function, num_rounds::Int) -> NamedTuple

Compare performance of multiple bandit algorithms.
"""
function compare_bandit_performance(agents::Vector{<:AbstractBandit}, contexts::Vector{Vector{Float64}},
                                   rewards_function::Function, num_rounds::Int)
    
    num_agents = length(agents)
    cumulative_rewards = [Float64[] for _ in 1:num_agents]
    regrets = [Float64[] for _ in 1:num_agents]
    
    for t in 1:num_rounds
        context = contexts[min(t, length(contexts))]
        
        # Compute optimal reward for regret calculation
        optimal_rewards = [rewards_function(context, i) for i in 1:agents[1].k]
        optimal_reward = maximum(optimal_rewards)
        
        for (i, agent) in enumerate(agents)
            # Select arm
            selected_arm, _ = select_arm(agent, context)
            
            # Get reward
            reward = rewards_function(context, selected_arm)
            
            # Update agent
            update_bandit!(agent, selected_arm, reward, context)
            
            # Track performance
            prev_reward = length(cumulative_rewards[i]) > 0 ? cumulative_rewards[i][end] : 0.0
            push!(cumulative_rewards[i], prev_reward + reward)
            
            regret = optimal_reward - reward
            prev_regret = length(regrets[i]) > 0 ? regrets[i][end] : 0.0
            push!(regrets[i], prev_regret + regret)
        end
    end
    
    return (
        cumulative_rewards = cumulative_rewards,
        regrets = regrets,
        final_average_rewards = [sum(rewards) / num_rounds for rewards in cumulative_rewards],
        final_regrets = [regret[end] for regret in regrets]
    )
end