import time

import torch
from torch.utils.data import DataLoader

import torchvision.transforms as trans

import dataloader as dlr

import albumentations as A
from albumentations.pytorch import ToTensorV2

import matplotlib.pyplot as plt

from networks import Model
from options import opt
from logger import logger

from CIFAR import CIFAR

from eval import evaluate, dict2ftlb

# 初始化训练评估数据集
train_dataset = CIFAR(
    {
        'folder'    : '../CIFAR/CIFAR100',
        'name'      : 'total',
        'list_file' : 'exp_imb_2.txt'
    },
    transforms=trans.Compose([
        trans.ToTensor(),
        trans.Resize(224)
    ])
)
val_dataset = CIFAR(
    {
        'folder'    : '../CIFAR/CIFAR100',
        'name'      : 'total',
        'list_file' : 'total_test_list.txt'
    },
    transforms=trans.Compose([
        trans.ToTensor(),
        trans.Resize(224)
    ])
)

# 重采样
weights = None
RSampler = dlr.RandBatchSampler(
    train_dataset.contents['coarse_labels'], 
    opt.batch, 
    weights=weights
)

train_loader = DataLoader(
    train_dataset, 
    # batch_size=opt.batch, 
    sampler = RSampler, 
    num_workers=opt.workers, 
    # shuffle=True
)
val_loader = DataLoader(
    val_dataset, 
    batch_size=opt.batch, 
    num_workers=opt.workers, 
    shuffle=False
)

# 初始化记录文件
log_file = logger(opt)

# 初始化模型
model = Model(opt)

model.classifier.train()

# 加载预训练模型，或重新开始被打断的训练
if opt.load:
    load_epoch = model.load(opt.load)
    start_epoch = load_epoch + 1 if opt.resume else 0
else:
    start_epoch = 0

for epoch_num in range(opt.epoch - start_epoch):
    cur_epoch = start_epoch + epoch_num
    epoch_start = time.time()
    for batch, data in enumerate(train_loader):
        
        batch_start = time.time()
        features, labels = data['data'], data['fine_labels']
        features = features.to(device=opt.device)
        labels = labels.to(device=opt.device)

        output = model.forward(features)
        loss = model.update(output, labels)

        if opt.msg_batch:
            if (batch + 1) % opt.msg_batch_freq == 0:
                print(
f'batch : {batch}, \
lr : {model.scheduler.get_last_lr() if model.scheduler is not None else opt.lr}, \
loss : {loss.item() : .4f}, avg_loss : {model.avg_loss : .4f}, \
time : {time.time() - batch_start : .2f}s, \
rate : {batch / (time.time() - epoch_start): .3f}b/s'
                    )
        
        if opt.log_batch_freq > 0 and (batch + 1) % opt.log_batch_freq == 0:
            log_file.append({
                'batch' : batch, 
                'lr' : (model.scheduler.get_last_lr() if model.scheduler is not None else opt.lr), 
                'loss' : f'{loss.item() : .4f}', 
                'avg_loss' : f'{model.avg_loss : .4f}',
            })

        if opt.eval_batch_freq > 0 and (batch + 1) % opt.eval_batch_freq == 0:
            rec, rec_cls = evaluate(model.classifier, val_loader, opt.classes, dict2ftlb)
            print(
f'eval : batch, \
rec : {rec : .4f}'
            )
            log_file.append({
                'eval' : 'batch', 
                'rec' : rec, 
                'rec_cls' : str(rec_cls),
            })

    if opt.msg_epoch:
        print(
f'epoch : {cur_epoch}, \
lr : {model.scheduler.get_last_lr() if model.scheduler is not None else opt.lr}, \
avg_loss : {model.avg_loss : .4f}, \
time : {time.time() - epoch_start : .2f}s'
            )

    if opt.log_epoch_freq > 0 and (batch + 1) % opt.log_epoch_freq == 0:
        log_file.append({
            'epoch' : cur_epoch, 
            'lr' : (model.scheduler.get_last_lr() if model.scheduler is not None else opt.lr), 
            'avg_loss' : f'{model.avg_loss : .4f}',
        })

    if opt.eval_epoch_freq > 0 and (cur_epoch + 1) % opt.eval_epoch_freq == 0:
        rec, rec_cls = evaluate(model.classifier, val_loader, opt.classes, dict2ftlb)
        print(
f'eval : epoch, \
rec : {rec : .4f}'
        )
        log_file.append({
            'eval' : 'epoch', 
            'rec' : rec, 
            'rec_cls' : str(rec_cls),
        })

    if opt.save_dir is not None and ((cur_epoch+1) % opt.save_freq == 0 or (cur_epoch+1) == opt.epoch):
        model.save(cur_epoch)

    model.lr_update()
    
