import time
import torch.nn as nn
import torch.optim as optim
from data_loader import *
import numpy as np
from matplotlib import pyplot as plt

class MNIST_Adder(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(1, 10, 5),
            nn.ReLU(),
            nn.MaxPool2d(2,2),
            nn.Conv2d(10, 20, 3),
            nn.ReLU()
        )
        self.fc = nn.Sequential(
            nn.Linear(4000, 1024),
            nn.ReLU(),
            nn.Linear(1024, 19),
            nn.LogSoftmax()
        )

    def forward(self, x1, x2):
        x1 = x1.view(-1, 1, 28, 28)  # (batch_size, channels, height, width)
        x2 = x2.view(-1, 1, 28, 28)  # (batch_size, channels, height, width)

        out1 = self.conv(x1)
        out1 = out1.view(out1.size(0), -1)

        out2 = self.conv(x2)
        out2 = out2.view(out2.size(0), -1)

        out = torch.cat([out1, out2], dim=-1)
        out = self.fc(out)
        return out


start = time.time()


def train():
    device = 'cuda' if torch.cuda.is_available() else "cpu"

    # 加载数据集
    train_loader1, train_loader2, validation_loader1, validation_loader2 = load_data_num_add()

    adder_model = MNIST_Adder().to(device)

    criterion = nn.CrossEntropyLoss()  # 选择交叉熵损失函数
    optimizer = optim.AdamW(adder_model.parameters(), lr=0.002, weight_decay=0.001)

    epochs = 10 # 训练迭代次数

    # 训练加法器模型
    all_train_loss=[]
    val_acc=[]
    for epoch in range(epochs):
        # 设置模型为训练模式
        adder_model.train()
        train_loss = 0
        for  (x_y1, x_y2) in zip(train_loader1, train_loader2):
            x1, y1 = [i.to(device) for i in x_y1]
            x2, y2 = [i.to(device) for i in x_y2]
            y = y1 + y2
            outputs = adder_model(x1, x2)
            loss = criterion(outputs, y)
            train_loss += loss.item()
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        all_train_loss.append(train_loss)
        # 打印训练信息
        print(f'迭代次数 {epoch+1}/{epochs}, 训练损失值: {train_loss:.4f}',end=' ')

        # 进行验证
        total=0
        correct=0
        adder_model.eval()
        with torch.no_grad():  # 使用 no_grad() 的情况下，不计算梯度、不更新参数
            for(va_x_y1, va_x_y2) in zip(validation_loader1, validation_loader2):
                x1, y1 = [i.to(device) for i in va_x_y1]
                x2, y2 = [i.to(device) for i in va_x_y2]
                y = y1 + y2
                outputs = adder_model(x1, x2)
                predicted = torch.argmax(outputs, dim=1)
                total += y.size(0)
                correct += (predicted == y).sum()

            validation_accuracy = 100 * correct / total
            print(f', 验证精度: {validation_accuracy:.2f}%')
            val_acc.append(validation_accuracy)


    return all_train_loss


train()


# ----------------------数据可视化---------------------------------

# plt.figure(figsize=(10, 5))
# epochs_range = range(epochs)
# plt.plot(epochs_range, all_train_loss, label='Training Loss')
# plt.title('Training  Loss')
#
# plt.figure(figsize=(10, 5))
# plt.plot(epochs_range, val_acc, label='Validation Accuracy')
# plt.title('Validation Accuracy')
#
#
# plt.figure('数据可视化', figsize=(16, 5))
# for key,value in images_dict.items():
#     # 维度缩减
#     npimg = np.squeeze(value[0].numpy())
#     plt.subplot(2, 10, key)
#     plt.imshow(npimg, cmap=plt.cm.binary)
#     plt.axis('off')
# plt.show()


end = time.time()
print('全程所花费时间为：%.2f秒' % (end - start))
