import os
from PIL import Image
import torch
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
import torch.backends.cudnn
import torchvision.transforms as transforms
from timm.utils import accuracy, AverageMeter
from sklearn.metrics import classification_report
import h5py

class Mydataset(torch.utils.data.Dataset):
    # 类初始化
    def __init__(self, img_paths, labels, transform_class, transform_defect):
        self.img_paths = img_paths
        self.labels = labels
        self.transform_class = transform_class
        self.transform_defect = transform_defect

    # 进行切片
    def __getitem__(self, index): #根据给出的索引进行切片，并对其进行数据处理转换成Tensor，返回成Tensor
        img_path = self.img_paths[index]
        label = self.labels[index]
        # get images from h5file
        f = h5py.File(img_path, 'r')  # 打开h5文件
        class_tiff = f['class_tiff'][:]
        defect_tiff = f['defect_tiff'][:]
        reference_tiff = f['reference_tiff'][:]
        f.close()


        class_tiff = Image.fromarray(class_tiff)
        defect_tiff = Image.fromarray(defect_tiff)
        reference_tiff = Image.fromarray(reference_tiff)

        if self.transform_class is not None:
            class_tiff = self.transform_class(class_tiff)

        if self.transform_defect is not None:
            defect_tiff = self.transform_defect(defect_tiff)
            reference_tiff = self.transform_defect(reference_tiff)
            # print(f'image size after transform:{image.shape}')#torch.Size([1, 256, 256])
        sample = {'class_tiff': class_tiff, 'defect_tiff': defect_tiff, 'reference_tiff': reference_tiff, 'label': label}
        return sample

    # 返回长度
    def __len__(self):
        return len(self.img_paths)

# 验证过程
@torch.no_grad()
def val(model, device, test_loader, epoch, best_model):
    global Best_ACC
    model.eval()
    loss_meter = AverageMeter()
    acc1_meter = AverageMeter()
    acc5_meter = AverageMeter()
    total_num = len(test_loader.dataset)
    print(total_num, len(test_loader))
    val_list = []
    pred_list = []
    for sample in test_loader:
    # for data, target in test_loader:
        class_image = sample['class_tiff']
        defect_image = sample['defect_tiff']
        reference_image = sample['reference_tiff']
        target = sample['label']
        for t in target:
            val_list.append(t.data.item())
        # data, target = data.to(device,non_blocking=True), target.to(device,non_blocking=True)
        # output = model(data)
        class_image, defect_image, reference_image, target = (class_image.to(device, non_blocking=True),
                                                                 defect_image.to(device, non_blocking=True),
                                                                 reference_image.to(device, non_blocking=True),
                                                                 target.to(device, non_blocking=True))
        # output = model(class_image, diff_image)
        output = model(class_image, defect_image)
        output.to(device)
        _, pred = torch.max(output.data, 1)
        for p in pred:
            pred_list.append(p.data.item())
        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        acc1_meter.update(acc1.item(), target.size(0))
        acc5_meter.update(acc5.item(), target.size(0))
    acc = acc1_meter.avg
    print('\nVal set: Average loss: {:.4f}\tAcc1:{:.3f}%\tAcc5:{:.3f}%\n'.format(
        loss_meter.avg,  acc,  acc5_meter.avg))
    return val_list, pred_list, loss_meter.avg, acc, best_model

def predictDir2(weight):
    categories = ['ADI_particle_developed', 'Array_peeling', 'Cu_missing', 'Other_peeling', 'Partial_etch',
                  'Pattern_fail',
                  'PR_peeling', 'Seam', 'Reference', 'Surface_particle', 'Burried_particle', 'Cu_diffuse',
                  'Prelayer_defect_developed',
                  'Void', 'Residue', 'Scratch']

    categories_to_id = dict((c, i) for i, c in enumerate(categories))
    id_to_categories = dict((v, k) for k, v in categories_to_id.items())

    BATCH_SIZE = 64
    EPOCHS = 200  # 600
    DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    use_amp = False  # 是否使用混合精度
    use_dp = False  # 是否开启dp方式的多卡训练
    classes = 16
    Best_ACC = 0  # 记录最高得分
    best_model = ''

    transform_test = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.40272543], std=[0.13901867])
    ])  # ([0.42368844, 0.42368844, 0.42368844], [0.14975642, 0.14975642, 0.14975642])

    resize_defect_ref_test = transforms.Compose([
        transforms.Resize((256, 256)),  # todo
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.402817], std=[0.14428316])  # TODO
    ])

    def get_imgs_labels(dir):
        # dir: E:\proj\AI\dataset\build_dataset\datasets\train
        imgs = []
        labels = []
        categories = os.listdir(dir)
        for category in categories:
            category_path = os.path.join(dir, category)
            imgs_path = os.listdir(category_path)
            for img in imgs_path:
                imgs.append(os.path.join(category_path, img))
                labels.append(categories_to_id[category])
        return imgs, labels

    val_imgs, val_labels = get_imgs_labels(r'/home/yeadc/Documents/cxj/datasets_h5_v2/val')

    dataset_test = Mydataset(val_imgs, val_labels, transform_test, resize_defect_ref_test)
    test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=BATCH_SIZE, shuffle=False)


    criterion_val = torch.nn.CrossEntropyLoss()
    # 设置模型
    model = torch.load(weight)
    model.eval()
    model.to(device=DEVICE)

    if use_amp:
        scaler = torch.cuda.amp.GradScaler()

    if torch.cuda.device_count() > 1 and use_dp:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = torch.nn.DataParallel(model)


    # 训练与验证
    log_dir = {}
    train_loss_list, val_loss_list, train_acc_list, val_acc_list, epoch_list = [], [], [], [], []
    for epoch in range(0, 1):
        epoch_list.append(epoch)
        val_list, pred_list, val_loss, val_acc, best_model = val(model, DEVICE, test_loader, epoch, best_model)
        val_loss_list.append(val_loss)
        val_acc_list.append(val_acc)
        log_dir['val_acc'] = val_acc_list
        log_dir['val_loss'] = val_loss_list
        log_dir['best_acc'] = Best_ACC
        print(classification_report(val_list, pred_list, target_names=categories))



def predictSingleImage(imagePath, weight):
    categories = ['ADI_particle_developed', 'Array_peeling', 'Cu_missing', 'Other_peeling', 'Partial_etch',
                  'Pattern_fail',
                  'PR_peeling', 'Seam', 'Reference', 'Surface_particle', 'Burried_particle', 'Cu_diffuse',
                  'Prelayer_defect_developed',
                  'Void', 'Residue', 'Scratch']
    print("============= input h5file and model weight ============")

    print(f"h5file path:{imagePath}")
    print(f"weight path:{weight}")
    categories_to_id = dict((c, i) for i, c in enumerate(categories))
    id_to_categories = dict((v, k) for k, v in categories_to_id.items())


    DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    model = torch.load(weight)
    model.eval()
    model.to(DEVICE)

    # preprocess
    transform_test = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            # transforms.Normalize(mean=[0.42368844, 0.42368844, 0.42368844], std=[0.14975642, 0.14975642, 0.14975642])
            transforms.Normalize(mean=[0.40272543], std=[0.13901867])
        ])
    f = h5py.File(imagePath, 'r')  # 打开h5文件
    class_tiff = f['class_tiff'][:]
    defect_tiff = f['defect_tiff'][:]
    f.close()
    print("============== Fov Images Preprocessing ================")
    print("Fov1 Image Before Preprocess:")
    print(class_tiff.shape)
    print("Fov2 Image Before Preprocess:")
    print(defect_tiff.shape)

    image_class = Image.fromarray(class_tiff).convert("L")
    image_class = transform_test(image_class)
    image_class = torch.unsqueeze(image_class, dim=0).to(DEVICE)

    image_defect = Image.fromarray(defect_tiff).convert("L")
    image_defect = transform_test(image_defect)
    image_defect = torch.unsqueeze(image_defect, dim=0).to(DEVICE)

    print("Fov1 Image After Preprocess:")
    print(image_class.shape)
    print("Fov2 Image After Preprocess:")
    print(image_defect.shape)

    # Predict
    print("==================== Inference =========================")
    out = model(image_class, image_defect)
    print(f"model output:{out}")
    _, pred = torch.max(out.data, 1)
    print(f"defect id:{pred}")
    res = id_to_categories[pred.data.item()] # defect name
    print(f"defect name:{res}")
    # print('Image Name:{} \npredict:{}'.format(imagepath, id_to_categories[pred.data.item()]))
    return res

def getStatedict(origin, output):
    model = torch.load(origin)
    torch.save(model.state_dict(), output)

def predictTestDIr(testDirPath, weight):
    categories = ['ADI_particle_developed', 'Array_peeling', 'Cu_missing', 'Other_peeling', 'Partial_etch',
                  'Pattern_fail',
                  'PR_peeling', 'Seam', 'Reference', 'Surface_particle', 'Burried_particle', 'Cu_diffuse',
                  'Prelayer_defect_developed',
                  'Void', 'Residue', 'Scratch']

    categories_to_id = dict((c, i) for i, c in enumerate(categories))
    id_to_categories = dict((v, k) for k, v in categories_to_id.items())

    DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = torch.load(weight)
    model.eval()
    model.to(DEVICE)

    # preprocess
    transform_test = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.40272543], std=[0.13901867])
        ])

    count = 0
    right = 0

    for categoriy in os.listdir(testDirPath):
        file_lists = os.listdir(os.path.join(testDirPath, categoriy))
        categoriy_count = 0
        categoriy_right = 0
        for file in file_lists:
            count += 1
            categoriy_count += 1
            imagePath = os.path.join(testDirPath, categoriy, file)
            image = None
            f = h5py.File(imagePath, 'r')  # 打开h5文件
            class_tiff = f['class_tiff'][:]
            defect_tiff = f['defect_tiff'][:]
            f.close()
            image_class = Image.fromarray(class_tiff).convert("L")
            image_class = transform_test(image_class)
            image_class = torch.unsqueeze(image_class, dim=0).to(DEVICE)

            image_defect = Image.fromarray(defect_tiff).convert("L")
            image_defect = transform_test(image_defect)
            image_defect = torch.unsqueeze(image_defect, dim=0).to(DEVICE)
            # Predict
            out = model(image_class, image_defect)
            _, pred = torch.max(out.data, 1)
            res = id_to_categories[pred.data.item()] # defect name

            if(res == categoriy):
                right += 1
                categoriy_right += 1
            else:
                a = None
                # print('Image Name:{} \npredict:{}\ncategory:{}'.format(file, id_to_categories[pred.data.item()], categoriy))
        print("{}:{} \n acc:{}".format(categoriy, categoriy_count, categoriy_right / categoriy_count))
        print("======================================================================")
    print("total:{} \n acc:{}".format(count, right / count))




if __name__ == '__main__':
    imagepath = "Residue_FY-A100_ALPA_ASI_lFFE160814.18_w12_462_Topography3.h5"
    weight = 'model_166_90.576.pth'
    # testDir = "/home/yeadc/Documents/cxj/datasets_h5_v2/val"

    # predictTestDIr(testDir, weight)
    # predictDir2(weight)
    predictSingleImage(imagepath, weight)
