# !/usr/bin/python3
# -*- coding: utf-8 -*-
# ------------------------------------------
# @Time    : Date - 2021/8/21   Time - 19:55
# @Author  : Spence Guo Tang
# @FileName: trainer.py
# ------------------------------------------

import time
import wandb
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms

from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score

from models import Classifier, BaseModel
from data_process import load_mnist


class MyDataset(Dataset):
    def __init__(self, images, labels):
        self.images = images
        self.labels = labels

    def __getitem__(self, idx):
        # idx 表示取的数据集中某个样本的index
        image = self.images[idx]
        label = self.labels[idx]
        return image, label

    def __len__(self):
        return len(self.labels)


def collate_fn(batch):
    _images, _labels = [], []
    # 计算每个batch传进来的数据个数，以处理最后一个batch的数据个数可能小于batch_size的情况
    _batch_size = len(batch)

    for b in batch:
        _images.append(b[0])
        _labels.append(b[1])
    _images = torch.cat(_images, dim=0).view(_batch_size, 1, 28, 28)
    _labels = torch.cat(_labels, dim=0).view(_batch_size, -1)

    _labels = torch.tensor(_labels, dtype=torch.float)
    return _images, _labels


def func(label_tensor, class_count: int):
    target = torch.zeros((label_tensor.shape[0], class_count))
    for i in range(label_tensor.shape[0]):
        idx = label_tensor[i, 0].item()
        target[i, idx] = 1
    return target


def train(device_type: str):
    train_images, train_labels = load_mnist(path="dataset", kind="train")
    test_images, test_labels = load_mnist(path="dataset", kind="t10k")

    train_set = MyDataset(train_images, train_labels)
    test_set = MyDataset(test_images, test_labels)

    # 初始化 wandb ----------------------
    wandb.init(project="mnist-classification", entity="spenceguo")

    epochs = 10
    learning_rate = 0.01
    batch_size = 10

    # wandb 跟踪参数 --------------------
    config = wandb.config
    config.learning_rate = learning_rate
    config.epochs = epochs
    config.batch_size = batch_size

    # dataset loader
    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)

    # train_loader = torch.utils.data.DataLoader(
    #     datasets.MNIST('data', train=True, download=True,
    #                    transform=transforms.Compose([
    #                        transforms.ToTensor(),
    #                        transforms.Normalize((0.1307,), (0.3081,))
    #                    ])),
    #     batch_size=batch_size, shuffle=True)
    # test_loader = torch.utils.data.DataLoader(
    #     datasets.MNIST('data', train=False, transform=transforms.Compose([
    #         transforms.ToTensor(),
    #         transforms.Normalize((0.1307,), (0.3081,))
    #     ])),
    #     batch_size=batch_size, shuffle=True)

    # model = Classifier()
    model = BaseModel()

    # 设置损失函数和优化函数
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # 开始训练
    for epoch in range(epochs):
        epoch_loss = 0

        i = 0
        for batch in train_loader:
            i += 1
            images, labels = batch
            images, labels = images.to(device_type), labels.to(device_type)

            optimizer.zero_grad()
            output = model(images)

            # 计算loss
            loss = criterion(output, labels)
            # 误差反向传播
            loss.backward()
            optimizer.step()

            epoch_loss += loss.item()

            wandb.log({"Training Batch Mean Loss": loss.item()})

            # 每100个batch便输出一次当前batch的loss
            if i % 100 == 0:
                print('epoch: {} ; batch: {}  loss: {}'.format(epoch, i, loss.item()))

        wandb.log({"Training Epoch Mean Loss": epoch_loss/i})

        # 保存训练模型, 为区别每一次训练的模型，此处以保存时间命名子文件夹
        # torch.save(model.state_dict(), "output/" + 'epoch' + str(epoch) + '.pt')


if __name__ == '__main__':
    device = "cuda:0" if torch.cuda.is_available() else "cpu"

    train(device)
