import numpy as np
import os
import time
import torch.cuda
import torch.backends.cudnn
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from tqdm import tqdm
from tensorboardX import SummaryWriter
from network.model_v3 import mobilenet_v3_large
from MyDataLoader.MyDataLoader import MyDataLoader
from utils import classDictWriter
from utils import makeDir
from utils import ConfigReader

# ------------------------------- load parameter -------------------------------
# device
if torch.cuda.is_available():
    device = "cuda:0"
    torch.backends.cudnn.benchmark = True  # 当网络输入的尺寸、维度等不变时可以设置
else:
    device = "cpu"
print("\033[1;33m using {} device. \033[0m".format(device))
# read config
CR = ConfigReader("config.yaml")
bz = CR.getElement("batch_size")
epochs = CR.getElement("epochs")
lr = CR.getElement("learning_rate")
nw = CR.getElement("num_workers")
iuput_size = CR.getElement("input_size")
num_classes = CR.getElement("num_classes")
dataset_path = CR.getElement("dataset_path")
train_path = os.path.join(dataset_path, "train")
val_path = os.path.join(dataset_path, "val")
pretrained_model_path = CR.getElement("pretrained_model_path")
save_path = CR.getElement("save_path")
makeDir(save_path)

# --------------------------------------- load dataset --------------------------------------
# train and validate data transform
data_transform = {
    "train": transforms.Compose([transforms.Resize([iuput_size, iuput_size]),
                                 transforms.ToTensor(),
                                 transforms.Normalize([0.485, 0.456, 0.406],
                                                      [0.229, 0.224, 0.225])]),
    "val": transforms.Compose([transforms.Resize([iuput_size, iuput_size]),
                               transforms.ToTensor(),
                               transforms.Normalize([0.485, 0.456, 0.406],
                                                    [0.229, 0.224, 0.225])])}
# load train and validate data
train_loader = MyDataLoader(path=train_path,
                            transform_=data_transform["train"], batch_size_=bz,
                            shuffle_=True, num_workers_=nw, pin_memory_=True)
val_loader = MyDataLoader(path=val_path,
                          transform_=data_transform["val"], batch_size_=bz,
                          shuffle_=False, num_workers_=nw, pin_memory_=True)

# numbers of picture in train and validation datasets
train_num = train_loader.size
val_num = val_loader.size
print(
    "\033[1;33m using {} images for training, {} images for validation.\033[0m".format(train_num,
                                                                                       val_num))

# write class dictories {'x':0, 'xx':1, 'xxx':2, ...}
class_dict = train_loader.class_dict
classDictWriter(path=save_path, class_dict=class_dict)

# ------------------------------------ network -------------------------------------
# 这里设置类别数，及是否C4和C5的通道数
net = mobilenet_v3_large(num_classes=num_classes, reduced_tail=True)

# ------------------------------- tensorboard writer -------------------------------
writer = SummaryWriter(logdir=save_path, flush_secs=10)
graphInputs = torch.from_numpy(np.random.rand(
    1, 3, iuput_size, iuput_size)).type(torch.FloatTensor)
writer.add_graph(net, graphInputs)

# ------------------------------- Transfer Learning -------------------------------
if pretrained_model_path is not "" and os.path.exists(pretrained_model_path):
    print('\033[1;40m find pretrained network, start transfer learning\033[0m')
    pre_weights = torch.load(pretrained_model_path, map_location=device)
    # load pretrained weights manually to avoid no matching name
    """
    MobileNet官方提供的训练文件去做迁移学习，加载预训练模型时
    会匹配网络的名称及尺寸，当加载的预训练模型或网络模型名称对不上时会报错
    本人参考网上的资料，将其改为预训练模型与网络模型尺寸的比对
    """
    keys = []
    i = 0
    j = 0
    for k, v in pre_weights.items():
        keys.append(k)  # 存入预训练模型字典的键
        j += 1
    # j = len(keys)
    pre_dict = net.state_dict()
    for k, v in pre_dict.items():
        # 如果 该层网络模型尺寸 等于 该层预训练模型尺寸
        if v.size() == pre_weights[keys[i]].size():
            pre_dict[k] = pre_weights[keys[i]]
            i += 1
            if i >= j:
                break
    # load weights
    missing_keys, unexpected_keys = net.load_state_dict(pre_dict, strict=True)
    # freeze features weights
    for param in net.features.parameters():
        param.requires_grad = False
else:
    print("\033[1;40m Transfer learning is canceled \033[0m")

net.to(device)  # to cuda

#  ------------------ define LossFunction and optimizer ------------------
loss_function = nn.CrossEntropyLoss()  # LogSoftmax
params = [p for p in net.parameters() if p.requires_grad]
optimizer = optim.AdamW(params, lr=lr)  # AdamW


def train():
    # ------------------------------------- train -------------------------------------
    best_acc = 0.0  # 最大验证准确率，当验证时的准确率大于当前最大准确率和0.75(可在下方自行设置)时，将会保存模型
    train_steps = len(train_loader)
    start_time = time.time()  # 开始时间
    scaler = torch.cuda.amp.GradScaler()  # 自动混合精度(AMP)
    for epoch in range(epochs):
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader)
        for step, data in enumerate(train_bar):
            images, labels = data
            optimizer.zero_grad(set_to_none=True)  # 内存分配器处理梯度

            # logits = net(images.to(device))
            # train_loss = loss_function(logits, labels.to(device))
            # train_loss.backward()
            # optimizer.step()

            # use AMP can raise train efficiency
            # 将运算转换为混合精度
            with torch.cuda.amp.autocast():
                logits = net(images.to(device))
                train_loss = loss_function(logits, labels.to(device))
            # 缩放损失，并调用backward()创建缩放梯度
            scaler.scale(train_loss).backward()
            # 取消缩放梯度和调用或跳过optimizer.step()
            scaler.step(optimizer)
            # 为下一次迭代更新比例
            scaler.update()

            # print statistics
            running_loss += train_loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.4f}".format(epoch + 1,
                                                                     epochs,
                                                                     train_loss)
        running_loss /= train_steps
        # ------------------------------------- validate -------------------------------------
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(val_loader)
            for val_data in val_bar:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))
                # val_loss = loss_function(outputs, val_labels.to(device))
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()
                val_bar.desc = "valid epoch[{}/{}]".format(epoch + 1,
                                                           epochs)
        val_accurate = acc / val_num

        writer.add_scalar("Loss", running_loss, epoch + 1)
        writer.add_scalar("ValAcc", val_accurate, epoch + 1)
        print(
            '\n \033[1;33m [epoch %d] train_loss: %.5f val_accuracy: %.3f \033[0m \n' %
            (epoch + 1, running_loss, val_accurate))

        # save network -- conditional
        if val_accurate > best_acc:
            best_acc = val_accurate
            model_save_path = save_path + "model-epoch{}_loss{:.4f}_acc{:.3f}.pth".format(
                epoch + 1, running_loss,
                val_accurate)
            torch.save(net.state_dict(), model_save_path)
        # deal with learning_rate manually
        lr_printing = None
        if epoch % 5 == 0:  # step_size
            for p in optimizer.param_groups:
                p['lr'] *= 0.9  # gamma
                lr_printing = p['lr']
            print('\033[1;34mlearning_rate: {:.5f}\033[0m'.format(lr_printing))

    end_time = time.time()
    cost = int(end_time - start_time)
    print('\033[1;32m Finished Training, cost time:\033[1;34m {}s \033[0m'.format(cost))


if __name__ == '__main__':
    train()
