import math
import random
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torchvision
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.nn.functional as F
from ResNet50 import get_resnet50
import argparse
from tools import *

def load_data_fashion_mnist(batch_size,resize=None):
    """生成Fashion的DataLoader"""
    data_transform = [transforms.ToTensor()]
    if resize:
        data_transform.insert(0, transforms.Resize(resize))
    data_compose = transforms.Compose(data_transform)  # 数据预处理为tensor类型
    mnist_train = torchvision.datasets.FashionMNIST(
        root="./dataset", train=True, transform=data_compose, download=True
    )
    mnist_test = torchvision.datasets.FashionMNIST(
        root="./dataset", train=False, transform=data_compose, download=True
    )
    mnist_train_loader = DataLoader(
        dataset=mnist_train, batch_size=batch_size, shuffle=True, num_workers=8, drop_last=False
    )
    mnist_test_loader = DataLoader(
        dataset=mnist_test, batch_size=batch_size, shuffle=True, num_workers=8, drop_last=False
    )
    # shuffle设置上一把和这一把是否打乱，num_workers设置多进程，drop_last设置是否舍去多余的，batch_size表示将数据集打包64个为一组
    return mnist_train_loader,mnist_test_loader

def load_data_cifar10(batch_size, resize=None):
    """生成Cifar10数据集"""
    data_transform = [transforms.ToTensor()]
    if resize:
        data_transform.insert(0, transforms.Resize(resize))
    data_compose = transforms.Compose(data_transform)  # 数据预处理为tensor类型
    cifar10_train = torchvision.datasets.CIFAR10(
        root="./dataset", train=True, transform=data_compose, download=True
    )
    cifar10_test = torchvision.datasets.CIFAR10(
        root="./dataset", train=False, transform=data_compose, download=True
    )
    train_loader = DataLoader(
        dataset=cifar10_train, batch_size=batch_size, shuffle=True, num_workers=8, drop_last=False
    )
    test_loader = DataLoader(
        dataset=cifar10_test, batch_size=batch_size, shuffle=True, num_workers=8, drop_last=False
    )
    # shuffle设置上一把和这一把是否打乱，num_workers设置多进程，drop_last设置是否舍去多余的，batch_size表示将数据集打包64个为一组
    return train_loader,test_loader
    
def accuracy(y_hat, y):
    """计算预测正确的数量"""
    if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
        y_hat = y_hat.argmax(axis=1)  # 返回最大概率的下标
    cmp = y_hat.type(y.dtype) == y
    return float(cmp.type(y.dtype).sum())  # 返回预测正确的总个数


def evaluate_accuracy_gpu(net, data_loader, device=None):
    """使用gpu计算模型的精度"""
    if isinstance(net, nn.Module):
        net.eval()
        if not device:
            device = next(iter(net.parameters())).device
    metric = Accumulator(2)
    with torch.no_grad():
        for X, y in data_loader:
            if isinstance(X, list):
                X = [x.to(device) for x in X]
            else:
                X = X.to(device)
            y = y.to(device)
            metric.add(accuracy(net(X), y), y.numel())
    return metric[0] / metric[1]


def train_ch6(net, train_loader, test_loader, num_epochs, lr, device):
    """使用GPU训练模型"""
    def init_weights(m):
        # 判断模型类别进行初始化参数
        if type(m) == nn.Linear or type(m) == nn.Conv2d:
            nn.init.xavier_uniform_(m.weight)
    # 初始化参数
    net.apply(init_weights)
    print("Train on is ", device)
    net.to(device)
    optimizer = torch.optim.SGD(net.parameters(), lr=lr)
    loss = nn.CrossEntropyLoss()
    animator = Animator(xlabel='epoch', xlim=[1, num_epochs], legend=[
                        'train loss', 'train acc', 'test acc'])
    timer, num_batches = Timer(), len(train_loader)
    for epoch in range(num_epochs):
        # 训练损失之和，训练准确率之和，样本数
        metric = Accumulator(3)
        net.train()
        for i, (X, y) in enumerate(train_loader):
            timer.start()
            # 清除之前的梯度值
            optimizer.zero_grad()
            # 数据集转移到cuda中
            X, y = X.to(device), y.to(device)
            y_hat = net(X)
            # 计算损失
            l = loss(y_hat, y)
            # 反向传播计算梯度
            l.backward()
            # 更新参数
            optimizer.step()
            # 计算train loss,train acc,test acc
            with torch.no_grad():
                metric.add(l * X.shape[0], accuracy(y_hat, y), X.shape[0])
            timer.stop()
            train_l = metric[0] / metric[2]
            train_acc = metric[1] / metric[2]
            if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
                animator.add(epoch + (i + 1) / num_batches,
                             (train_l, train_acc, None))
        test_acc = evaluate_accuracy_gpu(net, test_loader)
        animator.add(epoch + 1, (None, None, test_acc))
    print(f'loss {train_l:.3f}, train acc {train_acc:.3f}, '
          f'test acc {test_acc:.3f}')
    print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec '
          f'on {str(device)}')
    # 保存模型
    torch.save(net.state_dict(), 'resnet50_fashion.pth')


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('-nums_class', "--nums_class",type=int,default=10)  # 
    parser.add_argument('-num_epochs',"--num_epochs",type=int,default=10)   # 
    parser.add_argument('-batch_size', "--batch_size",type=int,default=128)  # 
    parser.add_argument('-resize', "--resize",type=int,default=224)  # 
    parser.add_argument('-lr', "--lr",type=float,default=0.01)  # 
    my_args = parser.parse_args()
    data_transform = [transforms.ToTensor()]
    if my_args.resize:
        data_transform.insert(0, transforms.Resize(my_args.resize))
    data_compose = transforms.Compose(data_transform)  # 数据预处理为tensor类型
    cifar10_train = torchvision.datasets.CIFAR10(
        root="./dataset", train=True, transform=data_compose, download=True
    )
    cifar10_test = torchvision.datasets.CIFAR10(
        root="./dataset", train=False, transform=data_compose, download=True
    )
    train_loader = DataLoader(
        dataset=cifar10_train, batch_size=my_args.batch_size, num_workers=8, shuffle=True, drop_last=False
    )
    test_loader = DataLoader(
        dataset=cifar10_test, batch_size=my_args.batch_size, num_workers=8,shuffle=True, drop_last=False
    )
    resnet50=get_resnet50(num_classes=my_args.nums_class)
    train_ch6(resnet50,train_loader,test_loader,my_args.num_epochs,my_args.lr,device=try_gpu())