import json
import os
import random
import shutil
import math
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
import torch.backends.cudnn
import torchvision.transforms as transforms
from timm.utils import accuracy, AverageMeter
from sklearn.metrics import classification_report
import h5py
from models.bifovnet.bifovnet import BiFovNet



torch.backends.cudnn.benchmark = False
import warnings
warnings.filterwarnings("ignore")
from utils.ema import EMA

# TODO: solve no qt platform plugin could be initialized
matplotlib.use('Agg')

seed = 240611
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True

# utils
origin_datadir = r'/home/yeadc/Documents/cxj/datasets_classify_v2'  # 初始数据集位置
categories = ['ADI_particle_developed', 'Array_peeling', 'Cu_missing', 'Other_peeling', 'Partial_etch', 'Pattern_fail',
              'PR_peeling', 'Seam', 'Reference', 'Surface_particle', 'Burried_particle', 'Cu_diffuse', 'Prelayer_defect_developed',
              'Void', 'Residue', 'Scratch']

categories_to_id = dict((c, i) for i, c in enumerate(categories))
id_to_categories = dict((v, k) for k, v in categories_to_id.items())

torch.cuda.empty_cache()

def get_imgs_labels(dir):
    # dir: E:\proj\AI\dataset\build_dataset\datasets\train
    imgs = []
    labels = []
    categories = os.listdir(dir)
    for category in categories:
        category_path = os.path.join(dir, category)
        imgs_path = os.listdir(category_path)
        for img in imgs_path:
            imgs.append(os.path.join(category_path, img))
            labels.append(categories_to_id[category])
    return imgs, labels

class Mydataset(torch.utils.data.Dataset):
    # 类初始化
    def __init__(self, img_paths, labels, transform_class, transform_defect):
        self.img_paths = img_paths
        self.labels = labels
        self.transform_class = transform_class
        self.transform_defect = transform_defect

    # 进行切片
    def __getitem__(self, index): #根据给出的索引进行切片，并对其进行数据处理转换成Tensor，返回成Tensor
        img_path = self.img_paths[index]
        label = self.labels[index]
        # get images from h5file
        f = h5py.File(img_path, 'r')  # 打开h5文件
        class_tiff = f['class_tiff'][:]
        defect_tiff = f['defect_tiff'][:]
        reference_tiff = f['reference_tiff'][:]
        f.close()


        class_tiff = Image.fromarray(class_tiff)
        defect_tiff = Image.fromarray(defect_tiff)
        reference_tiff = Image.fromarray(reference_tiff)

        if self.transform_class is not None:
            class_tiff = self.transform_class(class_tiff)

        if self.transform_defect is not None:
            defect_tiff = self.transform_defect(defect_tiff)
            reference_tiff = self.transform_defect(reference_tiff)
            # print(f'image size after transform:{image.shape}')#torch.Size([1, 256, 256])
        sample = {'class_tiff': class_tiff, 'defect_tiff': defect_tiff, 'reference_tiff': reference_tiff, 'label': label}
        return sample

    # 返回长度
    def __len__(self):
        return len(self.img_paths)

# 定义训练过程
def train(model, device, train_loader, optimizer, epoch):
    model.train()
    loss_meter = AverageMeter()
    acc1_meter = AverageMeter()
    acc5_meter = AverageMeter()
    total_num = len(train_loader.dataset)
    print(total_num, len(train_loader))

    # todo add batch accumulation parameter
    accum_iter = 2

    for batch_idx, samples in enumerate(train_loader):
        class_images = samples['class_tiff']
        defect_images = samples['defect_tiff']
        reference_images = samples['reference_tiff']
        labels = samples['label']

        class_images, defect_images, reference_images, target = (class_images.to(device, non_blocking=True),
                                                                     defect_images.to(device, non_blocking=True),
                                                                     reference_images.to(device, non_blocking=True),
                                                                     labels.to(device, non_blocking=True))
        targets = target
        #todo
        output = model(class_images, defect_images)
        loss = criterion_train(output, targets)
        loss = loss / accum_iter # scale loss to the mean of the accumulated batch size

        #backward pass
        loss.backward()

        #weight update
        if ((batch_idx + 1) % accum_iter == 0) or (batch_idx + 1 == len(train_loader)):
            optimizer.step()
            optimizer.zero_grad()


        lr = optimizer.state_dict()['param_groups'][0]['lr']
        loss_meter.update(loss.item(), target.size(0))
        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        loss_meter.update(loss.item(), target.size(0))
        acc1_meter.update(acc1.item(), target.size(0))
        acc5_meter.update(acc5.item(), target.size(0))
        if (batch_idx + 1) % 10 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR:{:.9f}'.format(
                epoch, (batch_idx + 1) * len(samples), len(train_loader.dataset),
                       100. * (batch_idx + 1) / len(train_loader), loss.item(), lr))
    ave_loss =loss_meter.avg
    acc = acc1_meter.avg
    print('epoch:{}\tloss:{:.2f}\tacc:{:.2f}'.format(epoch, ave_loss, acc))
    return ave_loss, acc


# 验证过程
@torch.no_grad()
def val(model, device, test_loader, epoch, best_model):
    global Best_ACC
    model.eval()
    loss_meter = AverageMeter()
    acc1_meter = AverageMeter()
    acc5_meter = AverageMeter()
    total_num = len(test_loader.dataset)
    print(total_num, len(test_loader))
    val_list = []
    pred_list = []
    if use_ema and epoch%ema_epoch==0:
        ema.apply_shadow()
    for sample in test_loader:
    # for data, target in test_loader:
        class_image = sample['class_tiff']
        defect_image = sample['defect_tiff']
        reference_image = sample['reference_tiff']
        target = sample['label']
        for t in target:
            val_list.append(t.data.item())
        # data, target = data.to(device,non_blocking=True), target.to(device,non_blocking=True)
        # output = model(data)
        class_image, defect_image, reference_image, target = (class_image.to(device, non_blocking=True),
                                                                 defect_image.to(device, non_blocking=True),
                                                                 reference_image.to(device, non_blocking=True),
                                                                 target.to(device, non_blocking=True))
        # output = model(class_image, diff_image)
        output = model(class_image, defect_image)
        output.to(device)
        loss = criterion_val(output, target)
        _, pred = torch.max(output.data, 1)
        for p in pred:
            pred_list.append(p.data.item())
        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        loss_meter.update(loss.item(), target.size(0))
        acc1_meter.update(acc1.item(), target.size(0))
        acc5_meter.update(acc5.item(), target.size(0))
    if use_ema and epoch % ema_epoch==0:
        ema.restore()
    acc = acc1_meter.avg
    print('\nVal set: Average loss: {:.4f}\tAcc1:{:.3f}%\tAcc5:{:.3f}%\n'.format(
        loss_meter.avg,  acc,  acc5_meter.avg))
    if epoch % 20 == 0:
        torch.save(model, file_dir + "/" + 'model_' + str(epoch) + '_' + str(round(acc, 3)) + '.pth')
    if acc > Best_ACC:
        if isinstance(model, torch.nn.DataParallel):
            torch.save(model.module, file_dir + "/" + 'model_' + str(epoch) + '_' + str(round(acc, 3)) + '.pth')
            torch.save(model.module, file_dir + '/' + 'best.pth')
            if best_model != '':
                os.remove(best_model)
            best_model = file_dir + "/" + 'model_' + str(epoch) + '_' + str(round(acc, 3)) + '.pth'
        else:
            torch.save(model, file_dir + "/" + 'model_' + str(epoch) + '_' + str(round(acc, 3)) + '.pth')
            torch.save(model, file_dir + '/' + 'best.pth')
            if best_model != '':
                os.remove(best_model)
            best_model = file_dir + "/" + 'model_' + str(epoch) + '_' + str(round(acc, 3)) + '.pth'

        Best_ACC = acc
    return val_list, pred_list, loss_meter.avg, acc, best_model


if __name__ == '__main__':
    # 创建保存模型的文件夹
    file_dir = 'checkpoints/bifnet_s_patch4_240612_starv2_224_48_[3,3,12,5]_two_branches_without_attn_cbam'
    if os.path.exists(file_dir):
        shutil.rmtree(file_dir)  # 删除再建立
        os.makedirs(file_dir)
    else:
        os.makedirs(file_dir)
    # 设置全局参数
    model_lr = 1e-3
    BATCH_SIZE = 64
    # TODO epochs:600->150
    EPOCHS = 50 # 600
    DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    # DEVICE = torch.device('cpu')
    use_amp = True  # 是否使用混合精度
    use_dp=False #是否开启dp方式的多卡训练
    classes = 16
    resume = False #todo
    CLIP_GRAD = 5.0
    model_path = "" #'best.pth'
    Best_ACC = 0 #记录最高得分
    use_ema=False
    ema_epoch=32
    best_model = ''

    # 数据预处理
    transform = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomVerticalFlip(p=0.5),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.40272543], std=[0.13901867])
    ])
    transform_test = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.40272543], std=[0.13901867])
    ])# ([0.42368844, 0.42368844, 0.42368844], [0.14975642, 0.14975642, 0.14975642])

    resize_defect_ref_train = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomVerticalFlip(p=0.5),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.402817], std=[0.14428316])  # TODO
    ])

    resize_defect_ref_test = transforms.Compose([
        transforms.Resize((224, 224)),  # todo
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.402817], std=[0.14428316])  # TODO
    ])


    #TODO
    train_imgs, train_labels = get_imgs_labels(r'/home/yeadc/Documents/cxj/datasets_h5_v2/train')
    val_imgs, val_labels = get_imgs_labels(r'/home/yeadc/Documents/cxj/datasets_h5_v2/val')

    dataset_train = Mydataset(train_imgs, train_labels, transform, resize_defect_ref_train)
    dataset_test = Mydataset(val_imgs, val_labels, transform_test, resize_defect_ref_test)

    # 导入数据
    train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, num_workers=8, shuffle=True, drop_last=True)
    test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=BATCH_SIZE, num_workers=8, shuffle=False)


    # 实例化模型并且移动到GPU
    criterion_train = torch.nn.CrossEntropyLoss()
    criterion_val = torch.nn.CrossEntropyLoss()
    # 设置模型
    model = BiFovNet(dim=48, depth=[3, 3, 12, 5], in_chans=1, kernel_size=7, patch_size=4,
                   num_classes=16, H=224, W=224, p_h=[8, 4, 2, 1], p_w=[8, 4, 2, 1])
    model.to(device=DEVICE)

    #todo
    if resume:
        model = torch.load(model_path)
        start_epoch = 1
    else:
        start_epoch = 1


    weight_decay = 5e-4
    warmup_epoch = 10

    optimizer = torch.optim.AdamW(model.parameters(), lr=model_lr,
                                  betas=(0.9, 0.95), weight_decay=weight_decay)
    # optimizer = torch.optim.SGD(model.parameters(), lr=model_lr, momentum=0.937, weight_decay=weight_decay)

    lr_func = lambda epoch: min((epoch + 1) / (warmup_epoch + 1e-8), 0.5 * (math.cos(epoch / EPOCHS * math.pi) + 1))
    cosine_schedule = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_func, verbose=True)

    if use_amp:
        scaler = torch.cuda.amp.GradScaler()

    if torch.cuda.device_count() > 1 and use_dp:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = torch.nn.DataParallel(model)
    if use_ema:
        ema = EMA(model, 0.999)
        ema.register()

    # 训练与验证
    is_set_lr = False
    log_dir = {}
    train_loss_list, val_loss_list, train_acc_list, val_acc_list, epoch_list = [], [], [], [], []
    for epoch in range(start_epoch, EPOCHS + 1):
        epoch_list.append(epoch)
        train_loss, train_acc = train(model, DEVICE, train_loader, optimizer, epoch)
        train_loss_list.append(train_loss)
        train_acc_list.append(train_acc)
        log_dir['train_acc'] = train_acc_list
        log_dir['train_loss'] = train_loss_list
        val_list, pred_list, val_loss, val_acc, best_model = val(model, DEVICE, test_loader, epoch, best_model)
        val_loss_list.append(val_loss)
        val_acc_list.append(val_acc)
        log_dir['val_acc'] = val_acc_list
        log_dir['val_loss'] = val_loss_list
        log_dir['best_acc'] = Best_ACC
        with open(file_dir + '/result.json', 'w', encoding='utf-8') as file:
            file.write(json.dumps(log_dir))
        print(classification_report(val_list, pred_list, target_names=categories))
        if epoch < 600:
            cosine_schedule.step()
        else:
            if not is_set_lr:
                for param_group in optimizer.param_groups:
                    param_group["lr"] = 1e-6
                    is_set_lr = True
        fig = plt.figure(1)
        plt.plot(epoch_list, train_loss_list, 'r-', label=u'Train Loss')
        # 显示图例
        plt.plot(epoch_list, val_loss_list, 'b-', label=u'Val Loss')
        plt.legend(["Train Loss", "Val Loss"], loc="upper right")
        plt.xlabel(u'epoch')
        plt.ylabel(u'loss')
        plt.title('Model Loss ')
        plt.savefig(file_dir + "/loss.png")
        plt.close(1)
        fig2 = plt.figure(2)
        plt.plot(epoch_list, train_acc_list, 'r-', label=u'Train Acc')
        plt.plot(epoch_list, val_acc_list, 'b-', label=u'Val Acc')
        plt.legend(["Train Acc", "Val Acc"], loc="lower right")
        plt.title("Model Acc")
        plt.ylabel("acc")
        plt.xlabel("epoch")
        plt.savefig(file_dir + "/acc.png")
        plt.close(2)






