package network

import (
	"math"
	"math/rand"
)

// Tensor3D 表示一个3D张量，用于卷积层的输入、输出和参数
type Tensor3D struct {
	Data   [][][]float64 // 数据，格式为[深度][高度][宽度]
	Depth  int           // 深度/通道数
	Height int           // 高度
	Width  int           // 宽度
}

// NewTensor3D 创建一个新的3D张量
func NewTensor3D(depth, height, width int) *Tensor3D {
	data := make([][][]float64, depth)
	for d := 0; d < depth; d++ {
		data[d] = make([][]float64, height)
		for h := 0; h < height; h++ {
			data[d][h] = make([]float64, width)
		}
	}
	return &Tensor3D{
		Data:   data,
		Depth:  depth,
		Height: height,
		Width:  width,
	}
}

// NewTensor3DWithRandom 创建一个使用随机值初始化的3D张量
func NewTensor3DWithRandom(depth, height, width int) *Tensor3D {
	tensor := NewTensor3D(depth, height, width)
	scale := math.Sqrt(2.0 / float64(depth*height*width)) // He初始化
	for d := 0; d < depth; d++ {
		for h := 0; h < height; h++ {
			for w := 0; w < width; w++ {
				tensor.Data[d][h][w] = rand.NormFloat64() * scale
			}
		}
	}
	return tensor
}

// Clone 复制一个3D张量
func (t *Tensor3D) Clone() *Tensor3D {
	clone := NewTensor3D(t.Depth, t.Height, t.Width)
	for d := 0; d < t.Depth; d++ {
		for h := 0; h < t.Height; h++ {
			for w := 0; w < t.Width; w++ {
				clone.Data[d][h][w] = t.Data[d][h][w]
			}
		}
	}
	return clone
}

// Filters 表示卷积层的滤波器集合
type Filters struct {
	Data         [][][][]float64 // 格式为[滤波器数量][输入通道][高度][宽度]
	FilterNum    int
	FilterDepth  int
	FilterHeight int
	FilterWidth  int
}

// NewFilters 创建一个新的滤波器集合
func NewFilters(filterNum, filterDepth, filterHeight, filterWidth int) *Filters {
	data := make([][][][]float64, filterNum)
	for f := 0; f < filterNum; f++ {
		data[f] = make([][][]float64, filterDepth)
		for d := 0; d < filterDepth; d++ {
			data[f][d] = make([][]float64, filterHeight)
			for h := 0; h < filterHeight; h++ {
				data[f][d][h] = make([]float64, filterWidth)
			}
		}
	}

	return &Filters{
		Data:         data,
		FilterNum:    filterNum,
		FilterDepth:  filterDepth,
		FilterHeight: filterHeight,
		FilterWidth:  filterWidth,
	}
}

// InitializeFilters 使用He初始化方法初始化滤波器权重
func InitializeFilters(filters *Filters) {
	filterNum := filters.FilterNum
	filterDepth := filters.FilterDepth
	filterHeight := filters.FilterHeight
	filterWidth := filters.FilterWidth

	// 计算初始化缩放因子
	scale := math.Sqrt(2.0 / float64(filterDepth*filterHeight*filterWidth))

	// 随机初始化所有滤波器权重
	for f := 0; f < filterNum; f++ {
		for d := 0; d < filterDepth; d++ {
			for h := 0; h < filterHeight; h++ {
				for w := 0; w < filterWidth; w++ {
					filters.Data[f][d][h][w] = rand.NormFloat64() * scale
				}
			}
		}
	}
}

// ConvLayer 实现卷积层
type ConvLayer struct {
	InputDepth           int
	InputHeight          int
	InputWidth           int
	FilterNum            int
	FilterDepth          int
	FilterHeight         int
	FilterWidth          int
	Stride               int
	Padding              int
	Filters              *Filters // 改为正确的滤波器结构
	Biases               []float64
	OutputDepth          int
	OutputHeight         int
	OutputWidth          int
	Inputs               *Tensor3D // 保存前向传播的输入，用于反向传播
	Activation           func(float64) float64
	ActivationDerivative func(float64) float64
	PreActivations       *Tensor3D // 保存激活前的值，用于反向传播
}

// NewConvLayer 创建一个新的卷积层
func NewConvLayer(inputDepth, inputHeight, inputWidth, filterNum, filterHeight, filterWidth, stride, padding int) *ConvLayer {
	// 确保深度匹配
	filterDepth := inputDepth

	// 计算输出维度
	outputHeight := (inputHeight-filterHeight+2*padding)/stride + 1
	outputWidth := (inputWidth-filterWidth+2*padding)/stride + 1

	// 创建并初始化权重（滤波器）
	filters := NewFilters(filterNum, filterDepth, filterHeight, filterWidth)
	InitializeFilters(filters)

	// 创建并初始化偏置
	biases := make([]float64, filterNum)

	return &ConvLayer{
		InputDepth:           inputDepth,
		InputHeight:          inputHeight,
		InputWidth:           inputWidth,
		FilterNum:            filterNum,
		FilterDepth:          filterDepth,
		FilterHeight:         filterHeight,
		FilterWidth:          filterWidth,
		Stride:               stride,
		Padding:              padding,
		Filters:              filters,
		Biases:               biases,
		OutputDepth:          filterNum,
		OutputHeight:         outputHeight,
		OutputWidth:          outputWidth,
		Activation:           ReLUScalar,
		ActivationDerivative: ReLUDerivativeScalar,
	}
}

// ReLUScalar ReLU激活函数的标量版本
func ReLUScalar(x float64) float64 {
	if x > 0 {
		return x
	}
	return 0
}

// ReLUDerivativeScalar ReLU导数的标量版本
func ReLUDerivativeScalar(x float64) float64 {
	if x > 0 {
		return 1
	}
	return 0
}

// pad 对输入添加填充
func pad(input *Tensor3D, padding int) *Tensor3D {
	if padding == 0 {
		return input
	}

	paddedHeight := input.Height + 2*padding
	paddedWidth := input.Width + 2*padding

	padded := NewTensor3D(input.Depth, paddedHeight, paddedWidth)

	for d := 0; d < input.Depth; d++ {
		for h := 0; h < input.Height; h++ {
			for w := 0; w < input.Width; w++ {
				padded.Data[d][h+padding][w+padding] = input.Data[d][h][w]
			}
		}
	}

	return padded
}

// Forward 实现卷积层的前向传播
func (c *ConvLayer) Forward(input interface{}) interface{} {
	inputTensor, ok := input.(*Tensor3D)
	if !ok {
		panic("ConvLayer.Forward: input is not a *Tensor3D")
	}

	// 保存输入用于反向传播
	c.Inputs = inputTensor.Clone()

	// 对输入添加填充
	paddedInput := pad(inputTensor, c.Padding)

	// 创建输出张量和前激活值张量
	output := NewTensor3D(c.OutputDepth, c.OutputHeight, c.OutputWidth)
	c.PreActivations = NewTensor3D(c.OutputDepth, c.OutputHeight, c.OutputWidth)

	// 对每个滤波器执行卷积
	for filterIdx := 0; filterIdx < c.FilterNum; filterIdx++ {
		// 对输出的每个位置执行卷积操作
		for outY := 0; outY < c.OutputHeight; outY++ {
			for outX := 0; outX < c.OutputWidth; outX++ {
				// 计算输入的起始位置
				startY := outY * c.Stride
				startX := outX * c.Stride

				// 卷积和
				sum := 0.0

				// 对每个通道和滤波器位置执行乘加操作
				for d := 0; d < c.FilterDepth; d++ {
					for fY := 0; fY < c.FilterHeight; fY++ {
						for fX := 0; fX < c.FilterWidth; fX++ {
							inputVal := paddedInput.Data[d][startY+fY][startX+fX]
							filterVal := c.Filters.Data[filterIdx][d][fY][fX]
							sum += inputVal * filterVal
						}
					}
				}

				// 添加偏置
				sum += c.Biases[filterIdx]

				// 保存前激活值
				c.PreActivations.Data[filterIdx][outY][outX] = sum

				// 应用激活函数
				output.Data[filterIdx][outY][outX] = c.Activation(sum)
			}
		}
	}

	return output
}

// Backward 实现卷积层的反向传播
func (c *ConvLayer) Backward(outputGradient interface{}, learningRate float64) interface{} {
	gradTensor, ok := outputGradient.(*Tensor3D)
	if !ok {
		panic("ConvLayer.Backward: outputGradient is not a *Tensor3D")
	}

	// 创建输入梯度
	inputGrad := NewTensor3D(c.InputDepth, c.InputHeight, c.InputWidth)

	// 创建滤波器梯度
	filterGrads := NewFilters(c.FilterNum, c.FilterDepth, c.FilterHeight, c.FilterWidth)

	// 创建偏置梯度
	biasGrads := make([]float64, c.FilterNum)

	// 对输入添加填充（用于计算滤波器梯度）
	paddedInput := pad(c.Inputs, c.Padding)

	// 对每个输出点计算梯度
	for filterIdx := 0; filterIdx < c.FilterNum; filterIdx++ {
		for outY := 0; outY < c.OutputHeight; outY++ {
			for outX := 0; outX < c.OutputWidth; outX++ {
				// 计算经过激活函数导数的梯度
				preActivation := c.PreActivations.Data[filterIdx][outY][outX]
				dActivation := c.ActivationDerivative(preActivation)
				chainGrad := gradTensor.Data[filterIdx][outY][outX] * dActivation

				// 累加偏置梯度
				biasGrads[filterIdx] += chainGrad

				// 计算滤波器梯度
				startY := outY * c.Stride
				startX := outX * c.Stride

				for d := 0; d < c.FilterDepth; d++ {
					for fY := 0; fY < c.FilterHeight; fY++ {
						for fX := 0; fX < c.FilterWidth; fX++ {
							// 滤波器梯度 = 输入值 * 输出梯度
							inputVal := paddedInput.Data[d][startY+fY][startX+fX]
							filterGrads.Data[filterIdx][d][fY][fX] += chainGrad * inputVal
						}
					}
				}

				// 计算输入梯度（用于反向传递）
				for d := 0; d < c.FilterDepth; d++ {
					for fY := 0; fY < c.FilterHeight; fY++ {
						for fX := 0; fX < c.FilterWidth; fX++ {
							if startY+fY-c.Padding >= 0 &&
								startY+fY-c.Padding < c.InputHeight &&
								startX+fX-c.Padding >= 0 &&
								startX+fX-c.Padding < c.InputWidth {
								inputY := startY + fY - c.Padding
								inputX := startX + fX - c.Padding
								// 输入梯度 = 滤波器权重 * 输出梯度
								inputGrad.Data[d][inputY][inputX] +=
									c.Filters.Data[filterIdx][d][fY][fX] * chainGrad
							}
						}
					}
				}
			}
		}
	}

	// 更新参数
	for filterIdx := 0; filterIdx < c.FilterNum; filterIdx++ {
		// 更新偏置
		c.Biases[filterIdx] -= learningRate * biasGrads[filterIdx]

		// 更新滤波器权重
		for d := 0; d < c.FilterDepth; d++ {
			for fY := 0; fY < c.FilterHeight; fY++ {
				for fX := 0; fX < c.FilterWidth; fX++ {
					c.Filters.Data[filterIdx][d][fY][fX] -=
						learningRate * filterGrads.Data[filterIdx][d][fY][fX]
				}
			}
		}
	}

	return inputGrad
}

// GetType 返回层的类型
func (c *ConvLayer) GetType() string {
	return "Convolutional"
}

// GetOutputShape 返回输出形状
func (c *ConvLayer) GetOutputShape() []int {
	return []int{c.OutputDepth, c.OutputHeight, c.OutputWidth}
}
