import random

import numpy as np


def generate_kernel(in_channels, out_channels, kernel_size):
    """
    此方法按正态分布生成卷积核
    :param out_channels: 输出通道数
    :param in_channels: 输入通道数
    :param kernel_size: 卷积核大小
    :return: 卷积核
    """
    return np.random.normal(loc=0, scale=1e-2, size=(out_channels, in_channels, kernel_size, kernel_size))


def relu(x):
    """
    relu激活函数
    :param x:
    :return:
    """
    return np.maximum(0, x)


def generate_W(in_features, out_features):
    """
    此方法按正态分布生成全连接层权重
    :param in_features: 输入参数个数
    :param out_features: 输出参数个数
    :return: 权重
    """
    return np.random.normal(loc=0, scale=1e-2, size=(in_features, out_features))


def Conv(inputs, kernel, stride=1):
    """
    此方法为单核卷积运算
    :param inputs: 单通道的二维数组
    :param kernel: 单通道的卷积核
    :param stride: 步长
    :return: 卷积运算结果
    """
    in_H, in_W = inputs.shape
    kernel_size = kernel.shape[0]
    out_H = (in_H - kernel_size) // stride + 1
    out_W = (in_W - kernel_size) // stride + 1
    ret = np.zeros([out_H, out_W])
    tmp = []
    index = 0
    for r in range(0, in_H, stride):
        for c in range(0, in_W, stride):
            # 判断边界
            if r + kernel_size <= in_H and c + kernel_size <= in_W:
                # 池化大小的输入区域
                cur_input = inputs[r:r + kernel_size,
                            c:c + kernel_size]
                # 和核进行乘法计算
                cur_output = cur_input * kernel
                # 再把所有值求和
                conv_sum = np.sum(cur_output)
                # 当前点输出值
                tmp.append(conv_sum)
    for r in range(out_H):
        for c in range(out_W):
            ret[r, c] = tmp[index]
            index += 1
    return ret


class Conv2d:
    def __init__(self, in_channels, out_channels,
                 kernel_size, stride=1, padding=0):
        """

        :param in_channels: 输入通道
        :param out_channels: 输出通道, 得根据输入通道，输出通道初始化权重
        :param kernel_size: 卷积核大小
        :param stride: 步长
        :param padding: 扩充
        """
        self.padding = padding
        self.stride = stride
        self.kernel_size = kernel_size
        self.out_channels = out_channels
        self.in_channels = in_channels
        # 权重即卷积核
        self.weight = generate_kernel(self.in_channels, self.out_channels,
                                      self.kernel_size)
        self.bias = 0

    def __call__(self, inputs):
        """

        :param inputs: (batch_size, in_channels, H, W)
        :return: 卷积运算后的结果: (batch_size, out_channels, H_hat, W_hat)
        """
        batch_size, in_channels, in_H, in_W = inputs.shape
        out_H = (in_H - self.kernel_size + 2 * self.padding) // self.stride + 1
        out_W = (in_W - self.kernel_size + 2 * self.padding) // self.stride + 1
        ret = np.zeros([batch_size, self.out_channels, out_H, out_W])
        inputs = np.pad(inputs, ((0, 0), (0, 0), (self.padding, self.padding), (self.padding, self.padding)),
                        'constant',
                        constant_values=(0, 0))
        for i in range(batch_size):
            for channel_out in range(self.out_channels):
                # 输入通道数循环
                for channel_in in range(self.in_channels):
                    # 当前通道的数据
                    channel_data = inputs[i][channel_in]
                    # 累加单核单通道卷积
                    ret[i, channel_out, :, :] += Conv(channel_data, self.weight[channel_out][channel_in], self.stride)
        return ret


class MaxPool2d:
    def __init__(self, kernel_size, stride=1):
        """

        :param kernel_size: 卷积核大小
        :param stride: 步长
        """
        self.kernel_size = kernel_size
        self.stride = stride

    def __call__(self, inputs):
        batch_size, channel, in_H, in_W = inputs.shape
        out_H = (in_H - self.kernel_size) // self.stride + 1
        out_W = (in_W - self.kernel_size) // self.stride + 1
        ret = np.zeros([batch_size, channel, out_H, out_W])
        tmp = []
        index = 0
        for i in range(batch_size):
            for channel_cur in range(channel):
                for r in range(0, in_H, self.stride):
                    for c in range(0, in_W, self.stride):
                        # 池化大小的输入区域
                        # 这里得注意判断边界
                        if r + self.kernel_size <= in_H and c + self.kernel_size <= in_W:
                            cur_input = inputs[i, channel_cur, r:r + self.kernel_size,
                                        c:c + self.kernel_size]
                            cur_max = cur_input.max()
                            # 暂存最大值
                            tmp.append(cur_max)
        for i in range(batch_size):
            for channel_cur in range(channel):
                for r in range(out_H):
                    for c in range(out_W):
                        ret[i, channel_cur, r, c] = tmp[index]
                        index += 1
        return ret


class Linear:
    """
    线性模型
    """
    def __init__(self, in_features, out_features):
        """

        :param in_features: 输入单元个数
        :param out_features: 输出单元个数  要根据这些来初始化weight
        """
        self.out_features = out_features
        self.in_features = in_features
        self.weight = generate_W(in_features, out_features)
        self.bias = 0

    def forward(self, inputs):
        """

        :return:
        """
        return np.einsum('ij,jk->ik', inputs, self.weight) + self.bias

    def __call__(self, inputs):
        return self.forward(inputs)


class CrossEntropyLoss:

    def __init__(self, weight=None, size_average=True):
        """
        初始化参数，因为要实现 torch.nn.CrossEntropyLoss 的两个比较重要的参数

        :param weight: 给予每个类别不同的权重
        :param size_average: 是否要对 loss 求平均
        """

        self.weight = weight
        self.size_average = size_average

    def __call__(self, input, target):
        """
        计算损失
        这个方法让类的实例表现的像函数一样，像函数一样可以调用

        :param input: (batch_size, C)，C是类别的总数
        :param target: (batch_size, 1)
        :return: 损失
        """

        batch_loss = 0.
        for i in range(input.shape[0]):

            numerator = np.exp(input[i, target[i]])  # 分子
            denominator = np.sum(np.exp(input[i, :]))  # 分母

            # 计算单个损失
            loss = -np.log(numerator / denominator)
            if self.weight:
                loss = self.weight[target[i]] * loss
            print("单个损失： ", loss)

            # 损失累加
            batch_loss += loss

        # 整个 batch 的总损失是否要求平均
        if self.size_average:
            batch_loss /= input.shape[0]

        return batch_loss




