import json
import os
import math
import random
import shutil
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
import torch.backends.cudnn
import torchvision.transforms as transforms
from timm.utils import accuracy, AverageMeter
from sklearn.metrics import classification_report
from timm.loss import SoftTargetCrossEntropy

from timm.models.swin_transformer_v2 import SwinTransformerV2
import h5py
from timm.models.convnext import ConvNeXt
from timm.models.swin_transformer import SwinTransformer

# TODO: solve no qt platform plugin could be initialized
matplotlib.use('Agg')

torch.backends.cudnn.benchmark = False
import warnings
warnings.filterwarnings("ignore")
from utils.ema import EMA



seed = 2314
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True

# utils
origin_datadir = r'/home/yeadc/Documents/cxj/datasets_classify_v2'  # 初始数据集位置

categories = ['ADI_particle_developed', 'Array_peeling', 'Cu_missing', 'Other_peeling', 'Partial_etch', 'Pattern_fail',
              'PR_peeling', 'Seam', 'Reference', 'Surface_particle', 'Burried_particle', 'Cu_diffuse', 'Prelayer_defect_developed',
              'Void', 'Residue', 'Scratch']

categories_to_id = dict((c, i) for i, c in enumerate(categories))
id_to_categories = dict((v, k) for k, v in categories_to_id.items())
'''
{'ADI_particle_developed': 0, 'Array_peeling': 1, 'Cu_missing': 2, 'Other_peeling': 3, 'Partial_etch': 4, 'Pattern_fail': 5, 'PR_peeling': 6, 'Seam': 
7, 'Reference': 8, 'Surface_particle': 9, 'Burried_particle': 10, 'Cu_diffuse': 11, 'Prelayer_defect_developed': 12, 'Void': 
13, 'Residue': 14, 'Scratch': 15}
'''

def get_imgs_labels(dir):
    # dir: E:\proj\AI\dataset\build_dataset\datasets\train
    imgs = []
    labels = []
    categories = os.listdir(dir)
    for category in categories:
        category_path = os.path.join(dir, category)
        imgs_path = os.listdir(category_path)
        for img in imgs_path:
            imgs.append(os.path.join(category_path, img))
            labels.append(categories_to_id[category])
    return imgs, labels

class Mydataset(torch.utils.data.Dataset):
    # 类初始化
    def __init__(self, img_paths, labels, transform):
        self.img_paths = img_paths
        self.labels = labels
        self.transform = transform

    # 进行切片
    def __getitem__(self, index): #根据给出的索引进行切片，并对其进行数据处理转换成Tensor，返回成Tensor
        img_path = self.img_paths[index]
        label = self.labels[index]
        # get images from h5file
        f = h5py.File(img_path, 'r')  # 打开h5文件
        # print(f.keys())  # 可以查看所有的主键
        class_tiff = f['class_tiff'][:]
        f.close()

        class_tiff = Image.fromarray(class_tiff)
        if self.transform is not None:
            class_tiff = self.transform(class_tiff)
        return class_tiff, label

    # 返回长度
    def __len__(self):
        return len(self.img_paths)

# 定义训练过程
def train(model, device, train_loader, optimizer, epoch):
    model.train()

    loss_meter = AverageMeter()
    acc1_meter = AverageMeter()
    acc5_meter = AverageMeter()
    total_num = len(train_loader.dataset)
    print(total_num, len(train_loader))
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device, non_blocking=True), target.to(device, non_blocking=True)
        output = model(data)
        loss = criterion_train(output, target)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()


        lr = optimizer.state_dict()['param_groups'][0]['lr']
        loss_meter.update(loss.item(), target.size(0))
        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        loss_meter.update(loss.item(), target.size(0))
        acc1_meter.update(acc1.item(), target.size(0))
        acc5_meter.update(acc5.item(), target.size(0))
        if (batch_idx + 1) % 10 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR:{:.9f}'.format(
                epoch, (batch_idx + 1) * len(data), len(train_loader.dataset),
                       100. * (batch_idx + 1) / len(train_loader), loss.item(), lr))
    ave_loss =loss_meter.avg
    acc = acc1_meter.avg
    print('epoch:{}\tloss:{:.2f}\tacc:{:.2f}'.format(epoch, ave_loss, acc))
    return ave_loss, acc


# 验证过程
@torch.no_grad()
def val(model, device, test_loader, epoch, best_model):
    global Best_ACC
    model.eval()
    loss_meter = AverageMeter()
    acc1_meter = AverageMeter()
    acc5_meter = AverageMeter()
    total_num = len(test_loader.dataset)
    print(total_num, len(test_loader))
    val_list = []
    pred_list = []
    for data, target in test_loader:
        for t in target:
            val_list.append(t.data.item())
        data, target = data.to(device,non_blocking=True), target.to(device,non_blocking=True)
        output = model(data)
        loss = criterion_val(output, target)
        _, pred = torch.max(output.data, 1)
        for p in pred:
            pred_list.append(p.data.item())
        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        loss_meter.update(loss.item(), target.size(0))
        acc1_meter.update(acc1.item(), target.size(0))
        acc5_meter.update(acc5.item(), target.size(0))
    acc = acc1_meter.avg
    print('\nVal set: Average loss: {:.4f}\tAcc1:{:.3f}%\tAcc5:{:.3f}%\n'.format(
        loss_meter.avg,  acc,  acc5_meter.avg))

    # todo
    if epoch % 15 == 0:
        torch.save(model, file_dir + "/" + 'model_' + str(epoch) + '_' + str(round(acc, 3)) + '.pth')
    if acc > Best_ACC:
        if isinstance(model, torch.nn.DataParallel):
            torch.save(model.module, file_dir + "/" + 'model_' + str(epoch) + '_' + str(round(acc, 3)) + '.pth')
            torch.save(model.module, file_dir + '/' + 'best.pth')
            if best_model != '':
                os.remove(best_model)
            best_model = file_dir + "/" + 'model_' + str(epoch) + '_' + str(round(acc, 3)) + '.pth'
        else:
            torch.save(model, file_dir + "/" + 'model_' + str(epoch) + '_' + str(round(acc, 3)) + '.pth')
            torch.save(model, file_dir + '/' + 'best.pth')
            if best_model != '':
                os.remove(best_model)
            best_model = file_dir + "/" + 'model_' + str(epoch) + '_' + str(round(acc, 3)) + '.pth'

        Best_ACC = acc
    return val_list, pred_list, loss_meter.avg, acc, best_model


if __name__ == '__main__':
    # 创建保存模型的文件夹
    file_dir = 'checkpoints/swin_s_224'
    if os.path.exists(file_dir):
        raise Exception("file dir exists")
    else:
        os.makedirs(file_dir)
    # 设置全局参数
    model_lr = 1e-3
    BATCH_SIZE = 96
    EPOCHS = 60
    DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    classes = 16 # TODO
    Best_ACC = 0 #记录最高得分
    best_model = ''

    # 数据预处理
    transform = transforms.Compose([
        transforms.RandomRotation(10),
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.40272543], std=[0.13901867])
    ])
    transform_test = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.40272543], std=[0.13901867])
    ])  # ([0.42368844, 0.42368844, 0.42368844], [0.14975642, 0.14975642, 0.14975642])


    train_imgs, train_labels = get_imgs_labels(r'/home/yeadc/Documents/cxj/datasets_h5_v2/train')
    val_imgs, val_labels = get_imgs_labels(r'/home/yeadc/Documents/cxj/datasets_h5_v2/val')

    dataset_train = Mydataset(train_imgs, train_labels, transform)
    dataset_test = Mydataset(val_imgs, val_labels, transform_test)

    # 导入数据
    train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, num_workers=8, shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=BATCH_SIZE, num_workers=8, shuffle=False)


    # 实例化模型并且移动到GPU
    criterion_train = torch.nn.CrossEntropyLoss()
    criterion_val = torch.nn.CrossEntropyLoss()
    # 设置模型

    # model = ConvNeXt(in_chans=1, num_classes=16, depths=(3, 3, 9, 3), dims=(96, 192, 384, 768))  # Convnext-T
    # model = ConvNeXt(in_chans=1, num_classes=16, depths=(3, 3, 27, 3), dims=(96, 192, 384, 768)) # Convnext-S
    # model = ConvNeXt(in_chans=1, num_classes=16, depths=(3, 3, 27, 3), dims=(128, 256, 512, 1024))  # Convnext-B

    # model = swintransformer(in_chans=1,
    #                         num_classes=16,
    #                         patch_size=4,
    #                         window_size=7,
    #                         embed_dim=96,
    #                         depths=(2, 2, 6, 2),
    #                         num_heads=(3, 6, 12, 24)) #Swin-T

    model = SwinTransformer(in_chans=1,
                            num_classes=16,
                            patch_size=4,
                            window_size=7,
                            embed_dim=96,
                            depths=(2, 2, 18, 2),
                            num_heads=(3, 6, 12, 24)) #Swin-S

    # model = swintransformer(in_chans=1,
    #                         num_classes=16,
    #                         patch_size=4,
    #                         window_size=7,
    #                         embed_dim=128,
    #                         depths=(2, 2, 18, 2),
    #                         num_heads=(4, 8, 16, 32)) #Swin-B


    model.to(DEVICE)

    weight_decay = 1e-4
    warmup_epoch = 5

    optimizer = torch.optim.AdamW(model.parameters(), lr=model_lr,
                                  betas=(0.9, 0.95), weight_decay=weight_decay)
    lr_func = lambda epoch: min((epoch + 1) / (warmup_epoch + 1e-8), 0.5 * (math.cos(epoch / EPOCHS * math.pi) + 1))
    cosine_schedule = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_func, verbose=True)


    # 训练与验证
    is_set_lr = False
    log_dir = {}
    train_loss_list, val_loss_list, train_acc_list, val_acc_list, epoch_list = [], [], [], [], []
    for epoch in range(1, EPOCHS + 1):
        epoch_list.append(epoch)
        train_loss, train_acc = train(model, DEVICE, train_loader, optimizer, epoch)
        train_loss_list.append(train_loss)
        train_acc_list.append(train_acc)
        log_dir['train_acc'] = train_acc_list
        log_dir['train_loss'] = train_loss_list
        val_list, pred_list, val_loss, val_acc, best_model = val(model, DEVICE, test_loader, epoch, best_model)
        val_loss_list.append(val_loss)
        val_acc_list.append(val_acc)
        log_dir['val_acc'] = val_acc_list
        log_dir['val_loss'] = val_loss_list
        log_dir['best_acc'] = Best_ACC
        with open(file_dir + '/result.json', 'w', encoding='utf-8') as file:
            file.write(json.dumps(log_dir))
        print(classification_report(val_list, pred_list, target_names=categories))
        if epoch < 600:
            cosine_schedule.step()
        else:
            if not is_set_lr:
                for param_group in optimizer.param_groups:
                    param_group["lr"] = 1e-6
                    is_set_lr = True
        fig = plt.figure(1)
        plt.plot(epoch_list, train_loss_list, 'r-', label=u'Train Loss')
        # 显示图例
        plt.plot(epoch_list, val_loss_list, 'b-', label=u'Val Loss')
        plt.legend(["Train Loss", "Val Loss"], loc="upper right")
        plt.xlabel(u'epoch')
        plt.ylabel(u'loss')
        plt.title('Model Loss ')
        plt.savefig(file_dir + "/loss.png")
        plt.close(1)
        fig2 = plt.figure(2)
        plt.plot(epoch_list, train_acc_list, 'r-', label=u'Train Acc')
        plt.plot(epoch_list, val_acc_list, 'b-', label=u'Val Acc')
        plt.legend(["Train Acc", "Val Acc"], loc="lower right")
        plt.title("Model Acc")
        plt.ylabel("acc")
        plt.xlabel("epoch")
        plt.savefig(file_dir + "/acc.png")
        plt.close(2)
