package network

import (
	"fmt"
	"gonum.org/v1/gonum/mat"
	"math"
)

// NeuralNetwork 神经网络模型
type NeuralNetwork struct {
	Layers []Layer // 存储网络中所有层的切片
}

// NewNeuralNetwork 创建一个新的神经网络
func NewNeuralNetwork() *NeuralNetwork {
	return &NeuralNetwork{
		Layers: make([]Layer, 0),
	}
}

// AddLayer 向神经网络添加一个层
func (nn *NeuralNetwork) AddLayer(layer Layer) {
	nn.Layers = append(nn.Layers, layer)
}

// Forward 执行整个网络的前向传播
func (nn *NeuralNetwork) Forward(input interface{}) interface{} {
	output := input
	for _, layer := range nn.Layers {
		output = layer.Forward(output)
	}
	return output
}

// TrainBatch 使用小批量梯度下降训练网络
func (nn *NeuralNetwork) TrainBatch(inputs []interface{}, targets []interface{}, learningRate float64) float64 {
	batchSize := len(inputs)
	totalLoss := 0.0

	for i := 0; i < batchSize; i++ {
		// 前向传播
		output := nn.Forward(inputs[i])

		// 计算损失
		loss := 0.0
		if vecOutput, ok := output.(*mat.VecDense); ok {
			vecTarget, ok := targets[i].(*mat.VecDense)
			if !ok {
				panic("Target is not a *mat.VecDense")
			}

			// 交叉熵损失
			for j := 0; j < vecOutput.Len(); j++ {
				outputVal := vecOutput.AtVec(j)
				if outputVal < 1e-10 {
					outputVal = 1e-10
				}
				loss -= vecTarget.AtVec(j) * math.Log(outputVal)
			}
		} else {
			panic("Unsupported output type for loss calculation")
		}

		totalLoss += loss

		// 反向传播
		var outputGradient interface{}
		if vecOutput, ok := output.(*mat.VecDense); ok {
			vecTarget, ok := targets[i].(*mat.VecDense)
			if !ok {
				panic("Target is not a *mat.VecDense")
			}

			// 交叉熵损失的梯度
			gradient := mat.NewVecDense(vecOutput.Len(), nil)
			gradient.SubVec(vecOutput, vecTarget)
			outputGradient = gradient
		} else {
			panic("Unsupported output type for gradient calculation")
		}

		// 从后向前执行反向传播
		for j := len(nn.Layers) - 1; j >= 0; j-- {
			outputGradient = nn.Layers[j].Backward(outputGradient, learningRate)
		}
	}

	return totalLoss / float64(batchSize)
}

// Train 使用小批量梯度下降训练网络
func (nn *NeuralNetwork) Train(inputs []interface{}, targets []interface{}, batchSize int, learningRate float64, epochs int) {
	numSamples := len(inputs)
	if numSamples != len(targets) {
		panic("Number of inputs does not match number of targets")
	}

	for epoch := 0; epoch < epochs; epoch++ {
		totalLoss := 0.0

		// 遍历每个小批量
		for i := 0; i < numSamples; i += batchSize {
			end := i + batchSize
			if end > numSamples {
				end = numSamples
			}

			batchInputs := inputs[i:end]
			batchTargets := targets[i:end]

			// 训练小批量
			batchLoss := nn.TrainBatch(batchInputs, batchTargets, learningRate)
			totalLoss += batchLoss * float64(end-i)
		}

		// 计算平均损失
		avgLoss := totalLoss / float64(numSamples)
		fmt.Printf("第 %d 轮训练 - 平均损失: %.4f\n", epoch+1, avgLoss)
	}
}

// Predict 预测样本的类别
func (nn *NeuralNetwork) Predict(input interface{}) int {
	output := nn.Forward(input)

	// 输出应该是一个向量
	vecOutput, ok := output.(*mat.VecDense)
	if !ok {
		panic("Output is not a *mat.VecDense")
	}

	// 找出概率最大的类别
	maxProb := vecOutput.AtVec(0)
	maxIdx := 0

	for i := 1; i < vecOutput.Len(); i++ {
		if vecOutput.AtVec(i) > maxProb {
			maxProb = vecOutput.AtVec(i)
			maxIdx = i
		}
	}

	return maxIdx
}

// Evaluate 评估模型在测试集上的准确率
func (nn *NeuralNetwork) Evaluate(inputs []interface{}, targets []interface{}) float64 {
	if len(inputs) != len(targets) {
		panic("Number of inputs does not match number of targets")
	}

	correct := 0
	for i := 0; i < len(inputs); i++ {
		pred := nn.Predict(inputs[i])

		// 找出目标one-hot向量中的1所在位置
		vecTarget, ok := targets[i].(*mat.VecDense)
		if !ok {
			panic("Target is not a *mat.VecDense")
		}

		actual := 0
		for j := 0; j < vecTarget.Len(); j++ {
			if vecTarget.AtVec(j) > 0.5 {
				actual = j
				break
			}
		}

		if pred == actual {
			correct++
		}
	}

	return float64(correct) / float64(len(inputs))
}

// SaveModel 保存模型参数到文件
func (nn *NeuralNetwork) SaveModel(filename string) error {
	// 实现模型保存逻辑
	return nil
}

// LoadModel 从文件加载模型参数
func (nn *NeuralNetwork) LoadModel(filename string) error {
	// 实现模型加载逻辑
	return nil
}
