using System;
using System.Collections.Generic;
using System.Linq;

namespace XSResearch
{
    /// <summary>
    /// 增强的研究智能体实现，包含深度Q学习和优先经验回放
    /// Enhanced research agent with Deep Q-Learning and prioritized experience replay
    /// </summary>
    public class ResearchAgent : Agent
    {
        private double explorationRate;
        private double learningRate;
        private double discountFactor;
        private NeuralNetwork neuralNetwork;
        private PriorityExperienceReplay memory;
        
        public ResearchAgent(string name, XRInterface xr, DataAnalyzer da) 
            : base(name, xr, da)
        {
            // 初始化超参数 / Initialize hyperparameters
            explorationRate = 0.3;
            learningRate = 0.001;
            discountFactor = 0.95;
            
            // 初始化神经网络 / Initialize neural network
            neuralNetwork = new NeuralNetwork(
                inputSize: 100,  // 状态向量大小 / State vector size
                hiddenLayers: new[] {64, 32},
                outputSize: 3);  // 可能动作数量 / Number of possible actions
            
            // 初始化优先经验回放 / Initialize prioritized experience replay
            memory = new PriorityExperienceReplay(capacity: 10000);
        }

        public double ExplorationRate 
        { 
            get => explorationRate;
            set => explorationRate = Math.Clamp(value, 0.01, 1.0);
        }

        public double LearningRate 
        { 
            get => learningRate;
            set => learningRate = Math.Clamp(value, 0.0001, 0.1);
        }

        public override void Perceive()
        {
            // 获取XR环境数据 / Get XR environment data
            var state = xrInterface.ReceiveXRData();
            
            // 预处理状态数据 / Preprocess state data
            var stateVector = PreprocessState(state);
            
            // 存储当前状态 / Store current state
            CurrentState = stateVector;
        }

        public override void Decide()
        {
            // ε-贪婪策略 / ε-greedy policy
            if (new Random().NextDouble() < explorationRate)
            {
                // 探索：随机选择动作 / Explore: random action
                CurrentAction = new Random().Next(0, 3);
            }
            else
            {
                // 利用：神经网络选择最优动作 / Exploit: neural network selects best action
                var qValues = neuralNetwork.Predict(CurrentState);
                CurrentAction = qValues.IndexOf(qValues.Max());
            }
            
            // 衰减探索率 / Decay exploration rate
            explorationRate *= 0.995;
            explorationRate = Math.Max(explorationRate, 0.01);
        }

        public override void Act()
        {
            // 执行选择的动作 / Execute selected action
            var result = dataAnalyzer.AnalyzeXRData(CurrentState);
            
            // 存储经验 / Store experience
            memory.Add(new Experience(
                state: CurrentState,
                action: CurrentAction,
                reward: CalculateReward(result),
                nextState: PreprocessState(result)
            ));
        }

        public override void Learn()
        {
            // 从记忆中采样批次 / Sample batch from memory
            var batch = memory.Sample(batchSize: 32);
            
            // 训练神经网络 / Train neural network
            neuralNetwork.Train(batch, learningRate, discountFactor);
            
            // 更新记忆优先级 / Update memory priorities
            memory.UpdatePriorities(batch);
        }

        private float[] PreprocessState(object rawState)
        {
            // 状态预处理逻辑 / State preprocessing logic
            return new float[100]; // 简化示例 / Simplified example
        }
        
        private float CalculateReward(object result)
        {
            // 奖励计算逻辑 / Reward calculation logic
            return 0.5f; // 简化示例 / Simplified example
        }
    }

    /// <summary>
    /// 优先经验回放缓冲区 / Prioritized experience replay buffer
    /// </summary>
    public class PriorityExperienceReplay
    {
        private readonly List<(Experience exp, float priority)> buffer;
        private readonly int capacity;
        private readonly Random random = new();
        
        public PriorityExperienceReplay(int capacity = 10000)
        {
            this.capacity = capacity;
            buffer = new List<(Experience, float)>(capacity);
        }
        
        public void Add(Experience experience)
        {
            if (buffer.Count >= capacity)
            {
                buffer.RemoveAt(0);
            }
            buffer.Add((experience, 1.0f)); // 初始优先级 / Initial priority
        }
        
        public List<Experience> Sample(int batchSize)
        {
            // 简化采样逻辑 / Simplified sampling logic
            return buffer
                .OrderBy(x => random.Next())
                .Take(batchSize)
                .Select(x => x.exp)
                .ToList();
        }
        
        public void UpdatePriorities(List<Experience> batch)
        {
            // 简化优先级更新 / Simplified priority update
            foreach (var exp in batch)
            {
                var index = buffer.FindIndex(x => x.exp.Equals(exp));
                if (index >= 0)
                {
                    buffer[index] = (exp, buffer[index].priority * 1.1f);
                }
            }
        }
    }
}