import os
import json
import pandas as pd
from tqdm import tqdm
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim import AdamW
from torch.optim.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmRestarts
from sklearn.model_selection import StratifiedKFold
from cutmix.cutmix import CutMix
from cutmix.utils import CutMixCrossEntropyLoss
from utils.model import timm_model
from utils.metrics import *
from utils.dataset import TrainDataset
from utils import draw
# Augmentation
import albumentations as A
from albumentations import pytorch as AT
import torch_utils as tu  # https://github.com/seefun/TorchUtils
import warnings

warnings.filterwarnings("ignore")

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cuda_info = torch.cuda.get_device_properties(0)
print("using {} {} {}MB.".format(device, cuda_info.name, cuda_info.total_memory / 1024 ** 2))

# seed everything
SEED = 233
tu.tools.seed_everything(SEED, deterministic=False)
# Configuration options
k_folds = 10

batch_size = 64
accumulation = 1
epochs = 30
mixup = 0.  # 0 to 1
mixup2cutmix = 0.
cutmix = 0.5  # 0 to 1
CosAnneal = True  # CosAnneal scheduler
lr = 3e-4
weight_decay = 2e-3
input_size = 224
dataset_path = "../Dataset"
use_pseudo = True
# timm model_name
# model_name = 'resnest50d'
model_name = 'tf_efficientnetv2_s_in21ft1k'
# model_name = 'tf_efficientnetv2_m_in21ft1k'  # fc --> classifier
# save dir
save_dir = model_name + '-pseude' + os.sep
if not os.path.exists(save_dir):
    os.makedirs(save_dir)

albu_transform = {
    'train': A.Compose([
        # A.LongestMaxSize((int(input_size * (256 / 224)))),
        # A.PadIfNeeded((int(input_size * (256 / 224))), (int(input_size * (256 / 224)))),
        A.Resize((int(input_size * (256 / 224))), (int(input_size * (256 / 224)))),
        A.RandomCrop(input_size, input_size),
        A.SomeOf([
            A.RandomRotate90(),
            A.HorizontalFlip(),
            A.VerticalFlip(),
            A.Flip(),
        ], 2),
        A.ShiftScaleRotate(border_mode=1),
        tu.randAugment(),
        A.Normalize(),  # default imagenet std and mean
        # A.Normalize(mean=(0.638, 0.568, 0.570),
        #             std=(0.245, 0.255, 0.255)),
        AT.ToTensorV2(p=1.0)  # include HWC -> CHW
    ]),
    'val': A.Compose([
        # A.LongestMaxSize((int(input_size * (256 / 224)))),
        # A.PadIfNeeded((int(input_size * (256 / 224))), (int(input_size * (256 / 224)))),
        A.Resize((int(input_size * (256 / 224))), (int(input_size * (256 / 224)))),
        A.CenterCrop(input_size, input_size),
        A.Normalize(),  # default imagenet std and mean
        # A.Normalize(mean=(0.638, 0.568, 0.570),
        #             std=(0.245, 0.255, 0.255)),
        AT.ToTensorV2(p=1.0)  # include HWC -> CHW
    ])
}
if use_pseudo:
    train_csv = 'train_test_70.csv'
else:
    train_csv = 'train_clean.csv'
csv_path = os.path.join(dataset_path, train_csv)
file_path = os.path.join(dataset_path, 'trainval')
data_info = pd.read_csv(csv_path)
labels = sorted(list(set(data_info['label'])))
nc = len(labels)  # num_classes
class_to_num = dict(zip(labels, range(nc)))
num_to_class = {v: k for k, v in class_to_num.items()}

# 保存成json key变成了字符串 {'0': 0, '1': 1, ...}
json_str = json.dumps(num_to_class, indent=4)  # 缩进
with open('class_indices.json', 'w') as json_file:
    json_file.write(json_str)

train_dataset = TrainDataset(csv_path, file_path, class_to_num, albu_transform['train'])
val_dataset = TrainDataset(csv_path, file_path, class_to_num, albu_transform['val'])
print("total {} images in trainval.".format(len(train_dataset)))  # 51136+6408*0.7

nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers
# nw = 0
print('Using {} dataloader workers every process'.format(nw))
print('Using model {}, data-path={}, save-dir={}'.format(model_name, dataset_path, save_dir))
print('seed={}, kfold={}, nc={}, epoch={}, batch-size={}, accumulate={}'.format(SEED, k_folds, nc, epochs, batch_size, accumulation))
print('init-lr={}, input-size={}, cutmix={}, mixup={}, mixup2cutmix={}'.format(lr, input_size, cutmix, mixup, mixup2cutmix))
# Loss function
drop_last = False
if cutmix:
    train_loss_function = CutMixCrossEntropyLoss(size_average=True)  # 'mean'
elif mixup:
    train_loss_function = tu.SoftTargetCrossEntropy()
    mixup_function = tu.Mixup(prob=mixup, switch_prob=mixup2cutmix, label_smoothing=0.05, num_classes=nc)
    drop_last = True
else:
    train_loss_function = tu.LabelSmoothingCrossEntropy(smoothing=0.1)
valid_loss_function = nn.CrossEntropyLoss()

# Define the K-fold Cross Validator
# kfold = KFold(n_splits=k_folds, shuffle=True, random_state=SEED)
skf = StratifiedKFold(n_splits=k_folds, shuffle=True, random_state=SEED)

# For fold results
results = {}
train_df = pd.read_csv(csv_path)

# AMP Training
scaler = torch.cuda.amp.GradScaler()
# K-fold Cross Validation model evaluation
for fold, (train_ids, valid_ids) in enumerate(skf.split(train_df['image'], train_df['label'])):

    print('--------------------------------------')
    print(f'FOLD {fold + 1}')

    # Sample elements randomly from a given list of ids, no replacement.
    train_subsampler = torch.utils.data.SubsetRandomSampler(train_ids)
    valid_subsampler = torch.utils.data.SubsetRandomSampler(valid_ids)
    print('{} images for training, {} images for validation'.format(len(train_subsampler), len(valid_subsampler)))
    print('--------------------------------------')

    # CutmixDataset
    if cutmix:
        train_dataset_mix = CutMix(train_dataset, num_class=nc, num_mix=2, beta=1.0, prob=cutmix)
    else:
        train_dataset_mix = train_dataset

    train_loader = DataLoader(train_dataset_mix,
                              batch_size=batch_size,
                              sampler=train_subsampler,
                              num_workers=nw,
                              drop_last=drop_last)
    valid_loader = DataLoader(val_dataset,
                              batch_size=batch_size,
                              sampler=valid_subsampler,
                              num_workers=nw)

    # Initialize a model and put it on the device specified.
    model = timm_model(model_name, pretrained=True, num_classes=nc).to(device)

    # Initialize optimizer
    optimizer = AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
    T = len(train_loader) // accumulation * epochs  # cycle
    if CosAnneal:
        scheduler = CosineAnnealingLR(optimizer, T_max=T, eta_min=1e-6)
    else:
        scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=5, T_mult=2)

    best_acc = 0.0
    best_f1 = 0.0
    val_loss_list = []
    val_acc_list = []
    lr_list = []
    train_loss_list = []
    for epoch in range(epochs):
        model.train()
        train_mloss = 0.
        train_metric = MetricMonitor()
        # Iterate the training set by batches
        train_stream = tqdm(train_loader)  # cutmix train_loader
        for batch, (imgs, labels) in enumerate(train_stream):
            imgs, labels = imgs.to(device), labels.to(device)
            if mixup:
                imgs, labels = mixup_function(imgs, labels)
            # Forward
            with torch.cuda.amp.autocast():
                logits = model(imgs)
                loss = train_loss_function(logits, labels)
            scaler.scale(loss).backward()
            if (batch + 1) % accumulation == 0:
                scaler.step(optimizer)  # optimizer.step()
                scaler.update()
                optimizer.zero_grad()
                scheduler.step()
            lr_list.append(scheduler.get_last_lr())  # record lr per batch
            # Metric
            if not cutmix:
                f1_marco = calculate_f1_macro(logits, labels)  # on cpu
                train_acc = accuracy(logits, labels)  # on cpu
                train_metric.update('F1', f1_marco)  # 因为label也做了CutMix，没法子算f1和acc
                train_metric.update('Acc', train_acc)
            train_metric.update('train loss', loss.item())
            train_mloss += loss.item() / len(train_loader)
            train_stream.set_description(
                'K_Fold[{}/{}] Epoch[{}/{}] {}'.format(fold + 1, k_folds, epoch + 1, epochs, train_metric)
            )
        # update lr_schduler per epoch
        # scheduler.step()
        train_loss_list.append(train_mloss)

        # Valid per epoch
        print('Starting validation')
        model.eval()
        val_metric = MetricMonitor()
        val_mloss = 0.
        val_stream = tqdm(valid_loader)
        with torch.no_grad():
            for batch, (imgs, labels) in enumerate(val_stream):
                logits = model(imgs.to(device))
                loss = valid_loss_function(logits, labels.to(device))
                # Metric
                f1_marco = calculate_f1_macro(logits, labels)  # on cpu
                val_acc = accuracy(logits, labels)  # on cpu
                val_metric.update('Loss', loss.item())
                val_metric.update('F1', f1_marco)
                val_metric.update('Acc', val_acc)
                # Record loss and accuracy
                val_mloss += loss.item() / len(valid_loader)
                val_stream.set_description(
                    'Validation: [{}]'.format(val_metric)
                )
            val_loss_list.append(val_mloss)

        if val_metric.metrics['Acc']['avg'] >= best_acc:
            best_acc = val_metric.metrics['Acc']['avg']
            if epoch > 20:
                print('Save weight! fold %d epoch %d: acc %.3f' % (fold+1, epoch+1, best_acc))
                save_path = f'{save_dir}model-fold-{fold}-epoch{epoch}-val_acc-{round(best_acc, 4)}.pth'
                torch.save(model.state_dict(), save_path)

        # if val_metric.metrics['F1']['avg'] >= best_f1:
        #     best_f1 = val_metric.metrics['F1']['avg']
        #     if epoch > 10:
        #         print('Save weight! fold %d epoch %d: f1_macro %.3f' % (fold, epoch, best_f1))
        #         save_path = f'{save_dir}model-fold-{fold}-epoch{epoch}-val_f1-{round(best_f1, 4)}.pth'
        #         torch.save(model.state_dict(), save_path)

        val_acc_list.append(best_acc)
        print('--------------------------------------')

    # Saving the last weight
    save_path = f'{save_dir}model-fold-{fold}-last.pth'
    torch.save(model.state_dict(), save_path)
    # Record k_fold best_acc or best_f1
    results[fold] = best_acc
    # results[fold] = best_f1
    if not os.path.exists(f'{save_dir}draw'):
        os.mkdir(f'{save_dir}draw')
    draw.draw_lr(lr_list, f'{save_dir}draw/lr-fold-{fold}.png')
    draw.draw_loss(train_loss_list, f'{save_dir}draw/t_loss-fold-{fold}.png')
    draw.draw_loss(val_loss_list, f'{save_dir}draw/v_loss-fold-{fold}.png')
    draw.draw_acc(val_acc_list, f'{save_dir}draw/v_acc-fold-{fold}.png')
    print('Save Draw!')

# Print all folds results
print(f'K-FOLD CROSS VALIDATION RESULTS FOR {k_folds} FOLDS')
print('--------------------------------------')
total_summation = 0.0
for key, value in results.items():
    print(f'Fold {key}: {value} ')
    total_summation += value
print(f'Average: {total_summation / len(results.items())} ')
