import os
import json
import pandas as pd
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.optim import Adam, AdamW
from torch.optim.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmRestarts
from sklearn.model_selection import KFold, ShuffleSplit, StratifiedShuffleSplit
from cutmix.cutmix import CutMix
from cutmix.utils import CutMixCrossEntropyLoss
from utils.model import *
from utils.metrics import *
from utils.dataset import TrainDataset
from utils import draw
from utils.label_smooth import LabelSmoothSoftmaxCE
# Augmentation
import albumentations as A
from albumentations import pytorch as AT
import warnings

warnings.filterwarnings("ignore")

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cuda_info = torch.cuda.get_device_properties(0)
print("using {} {} {}MB.".format(device, cuda_info.name, cuda_info.total_memory / 1024 ** 2))

# Set fixed random number seed
SEED = 233
torch.manual_seed(SEED)
train_size = 0.88

batch_size = 64
epochs = 25
cutmix = 0  # 0 to 1
accumulation = 1
CosAnneal = 5  # CosAnneal T_Max
CosT0 = 5  # CosAnnealWarm
CosTMult = 2
lr = 3e-4
weight_decay = 1e-3
input_size = 224
dataset_path = "../Dataset"
# save ckpt
model_name = 'resnest50d'
# model_name = 'seresnext50_32x4d'
# model_name = 'resnet50d'
# model_name = 'tf_efficientnetv2_m_in21ft1k'  # fc --> classify

save_dir = model_name + os.sep  # timm model
if not os.path.exists(save_dir):
    os.mkdir(save_dir)

albu_transform = {
    'train': A.Compose([
        # A.LongestMaxSize((int(input_size * (256 / 224)))),
        # A.PadIfNeeded((int(input_size * (256 / 224))), (int(input_size * (256 / 224)))),
        # change scale
        A.Resize((int(input_size * (256 / 224))), (int(input_size * (256 / 224)))),
        A.RandomCrop(input_size, input_size),
        A.SomeOf([
            A.RandomRotate90(),
            A.HorizontalFlip(),
            A.VerticalFlip(),
            A.Flip(),
        ], 2),
        A.ShiftScaleRotate(),
        A.OneOf([
            A.GaussianBlur(blur_limit=(3, 5)),
            A.MedianBlur(blur_limit=3),
            # A.MotionBlur(),  # 运动模糊
        ], p=0.3),
        A.SomeOf([
            A.RandomBrightnessContrast(),
            A.HueSaturationValue(),
            A.RGBShift(),
            A.ChannelShuffle(),
        ], 2),
        A.OneOf([
            A.CoarseDropout(),
            A.GridDropout(),
        ]),
        # A.Normalize(),  # default imagenet std and mean
        A.Normalize(mean=(0.638, 0.568, 0.570),
                    std=(0.245, 0.255, 0.255)),
        AT.ToTensorV2(p=1.0)  # include HWC -> CHW
    ]),
    'val': A.Compose([
        # A.LongestMaxSize((int(input_size * (256 / 224)))),
        # # 默认反射填充  零填充 border_mode=cv2.BORDER_CONSTANT
        # A.PadIfNeeded((int(input_size * (256 / 224))), (int(input_size * (256 / 224)))),
        A.Resize((int(input_size * (256 / 224))), (int(input_size * (256 / 224)))),
        A.CenterCrop(input_size, input_size),
        # A.Normalize(),  # default imagenet std and mean
        A.Normalize(mean=(0.638, 0.568, 0.570),
                    std=(0.245, 0.255, 0.255)),
        AT.ToTensorV2(p=1.0)  # include HWC -> CHW
    ])
}

csv_path = os.path.join(dataset_path, 'train_clean.csv')
file_path = os.path.join(dataset_path, 'trainval')
data_info = pd.read_csv(csv_path)
labels = sorted(list(set(data_info['label'])))
nc = len(labels)
class_to_num = dict(zip(labels, range(nc)))
num_to_class = {v: k for k, v in class_to_num.items()}

# 保存成json key变成了字符串 {'0': 0, '1': 1, ...}
json_str = json.dumps(num_to_class, indent=4)  # 缩进
with open('class_indices.json', 'w') as json_file:
    json_file.write(json_str)

train_dataset = TrainDataset(csv_path, file_path, class_to_num, albu_transform['train'])
val_dataset = TrainDataset(csv_path, file_path, class_to_num, albu_transform['val'])
print("total {} images in trainval.".format(len(train_dataset)))

nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers
# nw = 0
print('Using {} dataloader workers every process'.format(nw))
print('Using model {}, data-path={}, save-dir={}'.format(model_name, dataset_path, save_dir))
print('train_ratio={}, nc={}, epoch={}, batch-size={}, init-lr={}, input-size={}'.format(train_size, nc, epochs,
                                                                                         batch_size, lr, input_size))

# Loss function
if cutmix:
    train_loss_function = CutMixCrossEntropyLoss(size_average=True)  # 'mean'
else:
    train_loss_function = LabelSmoothSoftmaxCE(lb_pos=0.9, lb_neg=0.005)
valid_loss_function = nn.CrossEntropyLoss()

# Stratified Split
sss = StratifiedShuffleSplit(n_splits=1, train_size=train_size, random_state=SEED)
# ss = ShuffleSplit(n_splits=1, train_size=train_size, random_state=SEED)

train_csv = pd.read_csv(csv_path)
scaler = torch.cuda.amp.GradScaler()  # for AMP training
for train_index, valid_index in sss.split(train_csv['image'], train_csv['label']):
    train_subsampler = torch.utils.data.SubsetRandomSampler(train_index)
    valid_subsampler = torch.utils.data.SubsetRandomSampler(valid_index)
    print('{} images for training, {} images for validation'.format(len(train_subsampler), len(valid_subsampler)))
    print('--------------------------------------')

    # Define data loaders for training and testing data in this fold
    if cutmix:
        train_dataset = CutMix(train_dataset, num_class=nc, num_mix=1, beta=1.0, prob=cutmix)
    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              sampler=train_subsampler,
                              num_workers=nw)
    valid_loader = DataLoader(val_dataset,
                              batch_size=batch_size,
                              sampler=valid_subsampler,
                              num_workers=nw)

    # Initialize a model and put it on the device specified.
    model = timm_model(model_name, pretrained=True, num_classes=nc)
    model.to(device)

    # Initialize optimizer
    optimizer = AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
    if CosAnneal:
        scheduler = CosineAnnealingLR(optimizer, T_max=CosAnneal, last_epoch=-1, eta_min=1e-6)
    else:
        scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=CosT0, T_mult=CosTMult, last_epoch=-1)

    best_acc = 0.
    best_f1 = 0.
    iters = len(train_loader)
    val_loss_list = []
    val_acc_list = []
    lr_list = []
    train_loss_list = []
    for epoch in range(epochs):
        model.train()
        train_mloss = 0.
        train_metric = MetricMonitor()
        # Iterate the training set by batches
        train_stream = tqdm(train_loader)  # cutmix train_loader
        for batch, data in enumerate(train_stream):
            imgs, labels = data
            imgs = imgs.to(device)
            labels = labels.to(device)
            # Forward with AMP
            with torch.cuda.amp.autocast():
                logits = model(imgs)
                loss = train_loss_function(logits, labels)

            if (batch + 1) % accumulation == 0:
                scaler.scale(loss).backward()
                scaler.step(optimizer)  # optimizer.step()
                scaler.update()
                optimizer.zero_grad()
                scheduler.step(epoch + batch / iters)
                lr_list.append(scheduler.get_last_lr())  # record lr

            # Metric
            # f1_marco = calculate_f1_macro(logits, labels)  # on cpu
            # train_acc = accuracy(logits, labels)  # on cpu
            train_metric.update('train loss', loss.item())
            train_mloss += loss.item() / iters
            # train_metric_monitor.update('F1', f1_marco)  # 因为label也做了CutMix，没法子算f1和acc
            # train_metric_monitor.update('Acc', train_acc)
            # acc = (logits.argmax(dim=-1) == labels).float().mean()
            # train_accs.append(acc)
            train_stream.set_description(
                'Epoch[{}/{}]   {}'.format(epoch, epochs, train_metric)
            )

        # update lr_schduler per epoch
        # scheduler.step()
        train_loss_list.append(train_mloss)
        print('--------------------------------------')
        print('Starting validation')

        # Valid per epoch
        model.eval()
        val_metric = MetricMonitor()
        val_mloss = 0.
        val_stream = tqdm(valid_loader)
        with torch.no_grad():
            for batch, data in enumerate(val_stream):
                imgs, labels = data
                logits = model(imgs.to(device))
                loss = valid_loss_function(logits, labels.to(device))
                # Metric
                f1_marco = calculate_f1_macro(logits, labels)  # on cpu
                val_acc = accuracy(logits, labels)  # on cpu
                val_metric.update('Loss', loss.item())
                val_metric.update('F1', f1_marco)
                val_metric.update('Acc', val_acc)
                val_mloss += loss.item() / len(valid_loader)
                # 手动计算acc
                # acc = (logits.argmax(dim=-1) == labels.to(device)).float().mean().cpu()  # tensor(device=cuda)
                val_stream.set_description(
                    'Validation: [{}]'.format(val_metric)
                )
            val_loss_list.append(val_mloss)
            val_acc_list.append(val_acc)

        if val_metric.metrics['Acc']['avg'] >= best_acc:
            best_acc = val_metric.metrics['Acc']['avg']
            print('Save weight! Epoch %d: Acc %.3f' % (epoch, best_acc))
            save_path = f'{save_dir}epoch{epoch}-val_acc-{round(best_acc, 4)}.pth'
            torch.save(model.state_dict(), save_path)

        # if val_metric_monitor.metrics['F1']['avg'] >= best_f1:
        #     best_f1 = val_metric_monitor.metrics['F1']['avg']
        #     print('F1_marco for fold %d epoch %d: %.3f' % (fold, epoch, best_f1))
        #     save_path = f'{save_dir}model-fold-{fold}-epoch{epoch}-val_f1-{round(best_f1, 4)}.pth'
        #     torch.save(model.state_dict(), save_path)

    # Saving the last weight
    print('saving last weight...')
    save_path = f'{save_dir}last.pth'
    torch.save(model.state_dict(), save_path)
    print('Training Over! Best Accuracy is ', best_acc)
    if not os.path.exists(f'{save_dir}draw'):
        os.mkdir(f'{save_dir}draw')
    draw.draw_lr(lr_list, f'{save_dir}draw/lr.png')
    draw.draw_loss(train_loss_list, f'{save_dir}draw/t_loss.png')
    draw.draw_loss(val_loss_list, f'{save_dir}draw/v_loss.png')
    draw.draw_acc(val_acc_list, f'{save_dir}draw/v_acc.png')
    print('Draw Over!')
