using System;
using System.Collections.Generic;
using System.Linq;

namespace XSResearch
{
    /// <summary>
    /// 简单的全连接神经网络实现
    /// Simple fully-connected neural network implementation
    /// </summary>
    public class NeuralNetwork
    {
        private readonly List<Layer> layers;
        private readonly Random random = new();
        
        public NeuralNetwork(int inputSize, int[] hiddenLayers, int outputSize)
        {
            layers = new List<Layer>();
            
            // 创建输入层到第一个隐藏层的连接
            layers.Add(new Layer(inputSize, hiddenLayers[0]));
            
            // 创建隐藏层之间的连接
            for (int i = 1; i < hiddenLayers.Length; i++)
            {
                layers.Add(new Layer(hiddenLayers[i-1], hiddenLayers[i]));
            }
            
            // 创建最后的隐藏层到输出层的连接
            layers.Add(new Layer(hiddenLayers.Last(), outputSize));
            
            // 初始化权重
            InitializeWeights();
        }
        
        /// <summary>
        /// 使用Xavier初始化权重
        /// Initialize weights using Xavier initialization
        /// </summary>
        private void InitializeWeights()
        {
            foreach (var layer in layers)
            {
                var scale = Math.Sqrt(2.0 / (layer.InputSize + layer.OutputSize));
                for (int i = 0; i < layer.Weights.Length; i++)
                {
                    layer.Weights[i] = (float)(random.NextDouble() * 2 - 1) * (float)scale;
                }
            }
        }
        
        /// <summary>
        /// 前向传播预测Q值
        /// Forward propagation to predict Q-values
        /// </summary>
        public List<float> Predict(float[] input)
        {
            var current = input;
            foreach (var layer in layers)
            {
                current = layer.Forward(current);
            }
            return current.ToList();
        }
        
        /// <summary>
        /// 训练神经网络
        /// Train the neural network
        /// </summary>
        public void Train(List<Experience> batch, double learningRate, double discountFactor)
        {
            foreach (var exp in batch)
            {
                // 前向传播
                var predictedQ = Predict(exp.State);
                
                // 计算目标Q值
                var targetQ = predictedQ.ToArray();
                var nextQ = Predict(exp.NextState);
                var maxNextQ = nextQ.Max();
                targetQ[exp.Action] = (float)(exp.Reward + discountFactor * maxNextQ);
                
                // 反向传播更新权重
                Backpropagate(exp.State, targetQ, learningRate);
            }
        }
        
        private void Backpropagate(float[] input, float[] target, double learningRate)
        {
            // 简化的反向传播实现
            // 实际实现应包括完整的反向传播算法
            // This is a simplified implementation
            // A real implementation should include full backpropagation
            
            var outputs = new List<float[]>();
            var current = input;
            
            // 前向传播并保存各层输出
            foreach (var layer in layers)
            {
                current = layer.Forward(current);
                outputs.Add(current);
            }
            
            // 计算输出层误差
            var errors = new float[outputs.Last().Length];
            for (int i = 0; i < errors.Length; i++)
            {
                errors[i] = target[i] - outputs.Last()[i];
            }
            
            // 简化的权重更新
            for (int i = layers.Count - 1; i >= 0; i--)
            {
                layers[i].UpdateWeights(errors, (float)learningRate);
                
                // 计算前一层的误差 (简化版)
                if (i > 0)
                {
                    errors = layers[i].Backward(errors);
                }
            }
        }
    }
    
    /// <summary>
    /// 神经网络层实现
    /// Neural network layer implementation
    /// </summary>
    public class Layer
    {
        public float[] Weights { get; }
        public float[] Biases { get; }
        public int InputSize { get; }
        public int OutputSize { get; }
        
        public Layer(int inputSize, int outputSize)
        {
            InputSize = inputSize;
            OutputSize = outputSize;
            Weights = new float[inputSize * outputSize];
            Biases = new float[outputSize];
        }
        
        public float[] Forward(float[] input)
        {
            var output = new float[OutputSize];
            
            for (int i = 0; i < OutputSize; i++)
            {
                output[i] = Biases[i];
                for (int j = 0; j < InputSize; j++)
                {
                    output[i] += input[j] * Weights[j * OutputSize + i];
                }
                output[i] = ReLU(output[i]);
            }
            
            return output;
        }
        
        public void UpdateWeights(float[] errors, float learningRate)
        {
            // 简化的权重更新
            for (int i = 0; i < Weights.Length; i++)
            {
                Weights[i] += learningRate * errors[i % OutputSize];
            }
            
            for (int i = 0; i < Biases.Length; i++)
            {
                Biases[i] += learningRate * errors[i];
            }
        }
        
        public float[] Backward(float[] errors)
        {
            // 简化的误差反向传播
            var prevErrors = new float[InputSize];
            for (int i = 0; i < InputSize; i++)
            {
                for (int j = 0; j < OutputSize; j++)
                {
                    prevErrors[i] += errors[j] * Weights[i * OutputSize + j];
                }
            }
            return prevErrors;
        }
        
        private float ReLU(float x) => x > 0 ? x : 0;
    }
}