package layers

import (
	"lenet/network/activations"
	"math/rand"

	"gonum.org/v1/gonum/floats"
)

type convLayer struct {
	padding    uint //not implemented
	stride     uint //not implemented
	kernelSize uint
	//countKernel uint

	countInput uint
	dimInput   uint

	sumInput []float64
	inputs   [][]float64 // [dimInput^2]
	outputs  [][]float64 // [countKernel][dim^2]
	kernels  [][]float64 // [countKernel][kernelSize^2]

	activate activations.ActivationInterface
	lRate    float64
} // [x][y]

func NewConvLayer(countInput, dimInput, kernelSize, countKernel, padding, stride uint, lRate float64, activ activations.ActivationInterface) *convLayer {
	kerns := make([][]float64, 0)
	for i := 0; i < int(countKernel); i++ {
		kern := make([]float64, kernelSize*kernelSize)
		for j := 0; j < int(kernelSize)*int(kernelSize); j++ {
			kern[j] = rand.Float64()
		}
		kerns = append(kerns, kern)
	}

	return &convLayer{
		padding:    padding,
		stride:     stride,
		kernelSize: kernelSize,
		//countKernel: countKernel,

		countInput: countInput,
		dimInput:   dimInput,
		kernels:    kerns,

		activate: activ,
		lRate:    lRate,
	}
}

func (l *convLayer) Forward(inputs []float64) []float64 {
	//log.Println("Forward conv", len(inputs), l.dimInput, l.countInput)
	l.inputs = ConvertTensor1to2(inputs, l.countInput)
	sum := make([]float64, l.dimInput*l.dimInput)

	//fmt.Println("len inp", len(inputs), "len sum", len(sum), "len1input", uint(len(inputs))/l.countInput, "dim", l.dimInput)

	for i := 0; i < int(l.countInput); i++ {
		floats.Add(sum, l.inputs[i])
	}
	l.sumInput = sum

	//log.Println("Forward conv2", len(l.inputs))

	resDim := l.dimInput - l.kernelSize + 1

	//resLayers := make([][]float64, 0)
	//vec := ConvertTensor1to2(v, l.dimInput)
	var resKernels [][]float64
	for _, k := range l.kernels { //бежим по всем ядрам
		var res []float64
		for i := 0; i < int(resDim); i++ { //смещаем ядро вправо
			xPos := i * int(l.dimInput)
			for j := 0; j < int(resDim); j++ { //смещаем ядро вниз
				yPos := j
				dot := 0.
				//log.Println("conv  kernSize:", l.kernelSize, "resDim", resDim, i, j)
				for xOff := 0; xOff < int(l.kernelSize); xOff++ { // в ядре бежим вправо
					xOffset := xOff * int(l.dimInput)
					for yOffset := 0; yOffset < int(l.kernelSize); yOffset++ { // в ядре бежим вниз
						dot += l.sumInput[(yPos+yOffset)+(xPos+xOffset)] * k[yOffset+(xOff*int(l.kernelSize))]
					}
				}
				res = append(res, dot) // в итоге  получим результат применения ядра
			}
		}
		resKernels = append(resKernels, res)
	}

	l.outputs = resKernels
	/* 	var tmpRet [][]float64
	   	for _, v := range l.outputs {
	   		tmpRet = append(tmpRet, ConvertTensor2to1(v))
	   	} */

	//log.Println("Forward conv end", len(l.outputs), "x", len(l.outputs[0]))
	return l.activationForSlice(ConvertTensor2to1(resKernels)) //возвращаем результат
}

func (l *convLayer) BackProp(errs []float64) []float64 {
	//log.Println("back conv")
	// считаем новые ошибки
	newErrs := make([][]float64, l.countInput)
	sumErr := make([]float64, l.dimInput*l.dimInput)

	errsKernels := ConvertTensor1to2(errs, uint(len(l.kernels)))

	for k := 0; k < len(l.kernels); k++ { //бежим по ядрам
		err2D := ConvertTensor1to2(errsKernels[k], l.kernelSize)
		r := l.dimInput - l.kernelSize + 1

		dWeights := make([][]float64, l.kernelSize)
		for i := range dWeights {
			dWeights[i] = make([]float64, l.kernelSize)
		}
		newErr := make([][]float64, l.dimInput)
		for i := range newErr {
			newErr[i] = make([]float64, l.dimInput)
		}
		for x := 0; x < int(r); x++ { //позиция в матрице новых ошибок
			for y := 0; y < int(r); y++ { // позиция в матрице новых ошибок

				for xOff := 0; xOff < int(l.kernelSize); xOff++ { //в ядре бежим вправо //
					xOffset := xOff * int(l.kernelSize)

					for yOffset := 0; yOffset < int(l.kernelSize); yOffset++ { // в ядре бежим вниз
						newErr[x+xOff][y+yOffset] += err2D[xOff][yOffset] * l.kernels[k][xOffset+yOffset] // добавляем ошибку
						dWeights[xOff][yOffset] += l.lRate * err2D[xOff][yOffset] * l.activate.Derivate(l.outputs[k][yOffset+xOffset]) * l.sumInput[(x*int(l.dimInput)+xOffset)+y+yOffset]
					}
				}

			}
		}

		//меняем веса
		floats.Sub(l.kernels[k], ConvertTensor2to1(dWeights))

		//суммируем ошибку с этого ядра
		//fmt.Println("len sum", len(sumErr), "dims newErr:", len(newErr), "x", len(newErr[0]))
		floats.Add(sumErr, ConvertTensor2to1(newErr))

	}

	//суммарную ошибку делим по величине входа
	for i := range newErrs {
		tmpErr := make([]float64, l.dimInput*l.dimInput)
		for j := range tmpErr {
			if l.sumInput[j] != 0 {
				tmpErr[j] = sumErr[j] * (l.inputs[i][j] / l.sumInput[j])
			}
		}
		newErrs[i] = tmpErr
	}

	//log.Println("Back conv end")

	return ConvertTensor2to1(newErrs)
}

func (l *convLayer) activationForSlice(s []float64) []float64 {
	sCopy := make([]float64, len(s))
	copy(sCopy, s)
	for i := range sCopy {
		sCopy[i] = l.activate.Activate(sCopy[i])

	}
	return sCopy
}
