import numpy as np
import random
import os
import pickle
import nn
import cv2
# 数据迭代器，用于分批训练
def data_iter(batch_size,features, labels,  shuffle=True):
    num_examples = len(features)
    indices = list(range(num_examples))
    if shuffle:
        random.shuffle(indices)
    for i in range(0, num_examples, batch_size):
        batch_indices = indices[i: min(i + batch_size, num_examples)]
        yield features[batch_indices], labels[batch_indices]

# 交叉熵损失函数
def criterion(outputs, labels):
    return -(labels * outputs.log()).sum()

# 模型的保存
def save_model(model,path):

    # 判断路径，如果不存在就创建路径
    dirname = os.path.dirname(path)
    if dirname and not os.path.exists(dirname):
        os.makedirs(dirname)

    # 构建保存数据
    save_data = {
        'module': model.get_modules(),
        'params': model.get_params(),  # 模型参数
    }

    # 保存到文件
    with open(path, 'wb') as f:
        pickle.dump(save_data, f)
    print(f"模型已保存至 {path}")

# 模型加载
def load_model(path):
    # 从文件加载数据
    with open(path, 'rb') as f:
        save_data = pickle.load(f)
    class My_Model(nn.nn.Module):
        def __init__(self):
            super().__init__()
            index = 0
            self.classes = [0,1,2,3,4,5,6,7,8,9]
            for name, module in save_data['module'].items():
                setattr(self, name, module)
                # 更新数据
                it = getattr(self,name)
                if isinstance(it, nn.nn.conv2d) or isinstance(it, nn.nn.linear):
                    if it.bias is True:
                        it.set_params([save_data['params'][index],save_data['params'][index+1]])
                        index += 2
                    else:
                        it.set_params(save_data['params'][index])
                        index += 1
        def forward(self, x):
            # for name, module in save_data['module'].items():
            #     if name == 'fc1':
            #         x = x.reshape((x.data.shape[0], -1))
            #     model = getattr(self, name)
            #     x = model(x)
            x = self.relu(self.conv1(x))
            x = self.pool1(x)
            x = self.relu(self.conv2(x))
            x = self.pool2(x)
            x = x.reshape((x.data.shape[0], -1))
            x = self.relu(self.fc1(x))
            x = self.relu(self.fc2(x))
            x = self.fc3(x)

            return self.classes[np.argmax(x.data)]
            # return x
    ans = My_Model()
    return ans

# 图像处理
def read_img(img):
    image = cv2.imread(img)

    # 转换为GRAY格式
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

    # 获取原始图像尺寸
    h, w = image.shape[:2]

    t_w = 28
    t_h = 28

    # 计算缩放比例
    scale = min(t_w / w, t_h / h)

    # 计算新尺寸
    new_w = int(w * scale)
    new_h = int(h * scale)

    # 缩放图像
    resize = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_AREA)

    # 创建填充后的图像
    padding = np.full((t_h, t_w), 0, dtype=np.float32)

    # 计算放置位置（居中）
    x = (t_w - new_w) // 2
    y = (t_h - new_h) // 2

    # 将缩放后的图像放置在填充图像的中心
    padding[y:y+ new_h, x:x + new_w] = resize

    mean = 0.1307
    std = 0.3081
    padding = padding / 255.0
    padding = (padding - mean) / std
    np.reshape(padding, newshape=(1,1,t_h, t_w))

    return padding

# 方法类
class Way:

    # 随机梯度下降方法类
    class SGD:

        def __init__(self,params,lr,clip_value=1.0):
            self.params = params #参数
            self.lr = lr #学习率
            self.clip_value = clip_value #梯度裁剪

        # 梯度清零
        def zero_grad(self):
            for param in self.params:
                param.grad = 0

        # 梯度更新
        def step(self):
            for param in self.params:
                if param.grad is not None:
                    # 梯度裁剪
                    param.grad = np.clip(param.grad, -self.clip_value, self.clip_value)
                    param.data -= self.lr * param.grad

    # 卷积运算
    @staticmethod
    def conv2d(w_k,x, out_channels=1, in_channels=1,padding=0,stride=1):
        temp_data = np.copy(w_k)
        temp_x = np.copy(x)
        # 判断并修改维度，可以固定维度（现实中必须是四个维度，可以不需要）
        if len(temp_data.shape) == 2:
            temp_data = temp_data.reshape(out_channels, in_channels, temp_data.shape[0], temp_data.shape[1])
        elif len(temp_data.shape) == 3:
            temp_data = temp_data.reshape(out_channels, in_channels, temp_data.shape[1], temp_data.shape[2])
        if len(temp_x.shape) == 2:
            temp_x = temp_x.reshape(-1,in_channels, temp_x.shape[0], temp_x.shape[1])
        elif len(temp_x.shape) == 3:
            temp_x = temp_x.reshape(-1,in_channels, temp_x.shape[1], temp_x.shape[2])

        # 结果
        temp_ans = list()

        # 提取相应数据
        h, w = temp_x.shape[2:4]

        k_h, k_w = temp_data.shape[2:4]

        batch_size = temp_x.shape[0]

        # 提取步幅
        if isinstance(stride,int):
            s_h,s_w = stride,stride
        else:
            s_h,s_w = stride
        h = (h - k_h) // s_h + 1
        w = (w - k_w) // s_w + 1

        # 循环求卷积
        for batch in range(batch_size):
            for one in range(out_channels):
                for three in range(h):
                    for four in range(w):
                        ans = temp_x[batch,:,three*s_h:three*s_h + k_h, four*s_w:four*s_w + k_w] * temp_data[one, :, :, :]
                        temp_ans.append(ans.sum())

        # 改变ans维度
        temp_ans = np.array(temp_ans)
        temp_ans = temp_ans.reshape(batch_size,out_channels, h, w)

        # 回收，防止占用内存
        temp_data = None
        temp_x = None
        return temp_ans

    #池化运算
    @staticmethod
    def pool2d(k_h,k_w,x,padding=0,stride=None,mode='max'):
        temp_x = np.copy(x)
        # 判断并修改维度，可以固定维度
        if len(temp_x.shape) == 2:
            temp_x = temp_x.reshape(1,1, temp_x.shape[0], temp_x.shape[1])
        elif len(temp_x.shape) == 3:
            temp_x = temp_x.reshape(-1, temp_x.shape[0] , temp_x.shape[1], temp_x.shape[2])
        temp_ans = list()
        h, w = temp_x.shape[2:4]
        batch_size = temp_x.shape[0]
        # 提取步幅
        if stride is None:
            s_h, s_w = k_h,k_w
        else:
            if isinstance(stride, int):
                s_h, s_w = stride, stride
            else:
                s_h, s_w = stride
        h = int((h - k_h) // s_h + 1)
        w = int((w - k_w) // s_w + 1)
        # 循环求池化
        for batch in range(batch_size):
            for one in range(temp_x.shape[1]):
                for three in range(h):
                    for four in range(w):
                        if mode=='max':
                            ans = np.max(temp_x[batch, one, three*s_h:three*s_h + k_h, four*s_w:four*s_w + k_w])
                        else:
                            ans = np.mean(temp_x[batch, one, three*s_h:three*s_h + k_h, four*s_w:four*s_w + k_w])
                        temp_ans.append(ans)
        temp_ans = np.array(temp_ans)
        temp_ans = temp_ans.reshape(temp_x.shape[0], temp_x.shape[1], h, w)
        return temp_ans

