import sys
sys.path.append('.')
sys.path.append('..')
import torch
import torch.nn as nn
import segmentation_models_pytorch as smp
from utils.helper import get_subdirs, set_logger, GPUManager, remove_dataparallel
import os
import sys
import random
import argparse
import pandas as pd
import glob
from pathlib import Path
from tqdm import tqdm
import numpy as np
from datasets import T7_Mask_Dataset
from datasets.transformations import resize_transform_basic
import platform
import cv2
from PIL import Image
from utils.helper import denormalization
from utils.helper import read_xml, find_nodes, change_node_text, indent, write_xml

def parse_args():
    """
    Set args parameters
    """
    parser = argparse.ArgumentParser(description='Seg model turnon test.')
    parser.add_argument('--version', default='1.0', help='Data version.')
    parser.add_argument("--img_height", type=int, default=480, help="size of image height for image segmentation")
    parser.add_argument("--img_width", type=int, default=640, help="size of image width for image segmentation")
    parser.add_argument("--mean", nargs='+', default=[0.485, 0.456, 0.406], help="Define the mean for image normalization.")
    parser.add_argument("--std", nargs='+', default=[0.229, 0.224, 0.225], help="Define the std for image normalization.")
    parser.add_argument("--model", type=str, default='Unet', choices=['Unet', 'DeepLabV3Plus' 'PSPNet'], help="Define model name")
    parser.add_argument("--backbone", type=str, default='se_resnext50', choices=['resnet50', 'se_resnext50', 'mobilenet_v2'], help="Define model name")
    parser.add_argument("--img_dir", type=str, default='/data5/chao/Datasets/65D02/test/classification/original/v1.0', help="Define the data location.")
    parser.add_argument("--ckpt_dir", type=str, default='ckpts', help="Define where to save model checkpoints.")
    parser.add_argument("--save_dir", type=str, default='/data5/chao/Datasets/65D02/test/classification', help="Define where to save evaluate results.")
    parser.add_argument("--csv_dir", type=str, default='./csv', help="Define the dir to store csv files.")
    # parser.add_argument("--val_ratio", type=float, default=0.2, help="Define the ratio of validation set.")
    parser.add_argument("--crop_size", type=int, default=224, help="Define the crop size")
    parser.add_argument("--max_crop_num_per_image", type=int, default=30, help="Define the max crop num per image")
    parser.add_argument("--crop_defect_edge_first", type=int, default=1, help="Define whether crop defect edge first")
    parser.add_argument("--gpu_num", type=int, default=1, help="Define how many gpus used to train this model")
    parser.add_argument("--no_defect_code", nargs='+', default=['TSFAS', 'TSDFS', 'NOCOD', 'NOCOD2', 'TFOL0', 'TSFIX'], help="Define the dir to store csv files.")

    args = parser.parse_args()

    return args

def get_defect_center_in_grid(
                            defect_mask,
                            grid,
                            cal_grid_by_crop_size,
                            crop_size,
                            sort_descending_defect_ratio
                            ):

    crop_area = crop_size**2
    if cal_grid_by_crop_size:
        grid_0 = int(np.ceil(defect_mask.shape[0] / crop_size))
        grid_1 = int(np.ceil(defect_mask.shape[1] / crop_size))
        grid = [grid_0, grid_1]

    defect_mask = defect_mask.astype(np.uint8)
    output = cv2.connectedComponentsWithStats(defect_mask, 4, cv2.CV_32S)
    num_labels = output[0]
    labels = output[1]
    # stats = output[2]
    # centroids = output[3]

    # calculate block width height
    block_height = int(defect_mask.shape[0] / grid[0])
    block_width = int(defect_mask.shape[1] / grid[1])
    h = crop_size
    w = crop_size

    list_defect_cen_ratio_bound = []
    for label_num in range(num_labels):
        each_defect = np.zeros((defect_mask.shape[0], defect_mask.shape[1]), np.uint8)
        
        if label_num > 0:
            each_defect[labels==label_num] = 255
            
            # divide it
            from_x = 0
            from_y = 0
            for r in range(grid[0]):
                # print(r)
                to_y = from_y + block_height + 1
                for c in range(grid[1]):
                    to_x = from_x + block_width + 1
                    each_defect_tmp = np.zeros((defect_mask.shape[0], defect_mask.shape[1]), np.uint8)
                    each_defect_tmp[from_y:to_y, from_x:to_x] = each_defect[from_y:to_y, from_x:to_x].copy()
                    
                    if np.sum(each_defect_tmp) != 0:
                        # t0 = time.time()
                        output = cv2.connectedComponentsWithStats(each_defect_tmp, 8, cv2.CV_32S)
                        # t1 = time.time()
                        # total = t1-t0
                        # print("time used:" + str(total*1000) + "ms")

                        centroid = output[3][1]
                        center = (int(centroid[0]), int(centroid[1]))                        
                        
                        # crop bound and readjust by at boundary 
                        x = center[0]
                        y = center[1]
                        if int(y-int(h/2)+1) < 0:
                            y = y + (0 - int(y-int(h/2)+1))
                        if int(x-int(w/2)+1) < 0:
                            x = x + (0 - int(x-int(w/2)+1))
                        if int(y+int(h/2)+1) > defect_mask.shape[0]:
                            y = y - (int(y+int(h/2)+1) - defect_mask.shape[0])
                        if int(x+int(w/2)+1) > defect_mask.shape[1]:
                            x = x - (int(x+int(w/2)+1) - defect_mask.shape[1])
                        y_min = int(y-int(h/2)+1)
                        x_min = int(x-int(w/2)+1)
                        y_max = int(y+int(h/2)+1)
                        x_max = int(x+int(w/2)+1)
                        
                        each_defect_tmp2 = each_defect[y_min:y_max, x_min:x_max].copy()
                        defect_ratio = np.sum(each_defect_tmp2>0) / crop_area
                        
                        list_defect_cen_ratio_bound.append([center, label_num, defect_ratio, (x_min, y_min), (x_max, y_max)])
                        
                    from_x = to_x
                # each columns in grid
                from_y = to_y
                from_x = 0
            # each rows in grid
            
    # sort according to defect ratio
    if sort_descending_defect_ratio:
        list_defect_cen_ratio_bound = sorted(list_defect_cen_ratio_bound, key=lambda x: x[2], reverse=True)
    else:
        list_defect_cen_ratio_bound = sorted(list_defect_cen_ratio_bound, key=lambda x: x[2], reverse=False)
    
    return list_defect_cen_ratio_bound 


def random_crop(img, size):
    h, w = img.shape[:-1] #h,w,channel [:-1] beside the final element, such as channel 
    x = random.randint(0, w-size) #random number
    y = random.randint(0, h-size)

    crop_img = img[y:y+size, x:x+size].copy()
    y_min = y
    y_max = y+size
    x_min = x
    x_max = x+size

    return crop_img, y_min, x_min, y_max, x_max

def get_extensions(path):
    ListFiles = os.walk(path)
    SplitTypes = []
    for walk_output in ListFiles:
        for file_name in walk_output[-1]:
            SplitTypes.append(file_name.split(".")[-1])
    extensions, counts = np.unique(SplitTypes, return_counts=True)

    try:
        return_ext = extensions[np.argmax(counts)]
    except:
        return_ext = 'jpg'

    return return_ext

def main():
    args = parse_args()
    # choose free gpus to train
    gpu_list = ",".join([str(x) for x in GPUManager().auto_choice(gpu_num=args.gpu_num)])
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_list)

    # make sure that the mean and std are float list not str list
    args.mean = [float(x) for x in args.mean]
    args.std = [float(x) for x in args.std]
    # args.save_dir = os.path.join(args.save_dir, args.version)
    # Search Data and Mask dir to create dataframe for training
    print('Loading the datasets ...')
    if os.path.isdir(args.img_dir):
        code_name_list = get_subdirs(args.img_dir)
        df_total = pd.DataFrame()
        for code_name in tqdm(code_name_list):
            data_dir = os.path.join(args.img_dir, code_name)
            img_list = sorted(glob.glob(os.path.join(data_dir, '*.jpg')))
            img_list.extend(sorted(glob.glob(os.path.join(data_dir, '*.JPG'))))
            img_list.extend(sorted(glob.glob(os.path.join(data_dir, '*.png'))))
            for img_cache in tqdm(img_list):
                try:
                    file_ = Image.open(img_cache).load()
                    df_total_cache = pd.DataFrame()
                    base_name = Path(img_cache).stem
                    df_total_cache['image'] = [img_cache]
                    df_total_cache['label'] = [code_name]
                    df_total = pd.concat([df_total, df_total_cache])
                except IOError:
                    print('Image file %s has something wrong, will not used for train.' %img_cache)

        df_test = df_total
    elif os.path.isfile(args.img_dir) and Path(args.img_dir).suffix == '.csv':
        df_test = pd.read_csv(args.img_dir)
    else:
        raise ValueError('Img dir should be a dir or a csv file.')

    print(f'The data set shape is {df_test.shape}.')

    test_dataset = T7_Mask_Dataset(df_test, 
                                    resize_transform_basic(img_size=(args.img_height,args.img_width),
                                                            mean=args.mean,
                                                            std=args.std)
                                    )
    kwargs = {'num_workers': 8, 'pin_memory': True} if (torch.cuda.is_available() and platform.system() == 'Linux') else {}
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=True, **kwargs)
    print('Model params has been loaded from %s.' %args.ckpt_dir)
    # Setup Seg Model
    if args.backbone =='se_resnext50':
        args.backbone = 'se_resnext50_32x4d'
    model = getattr(smp, args.model)(args.backbone, encoder_weights=None, classes=1, activation=None)
    # pth_dir = glob.glob(os.path.join(args.ckpt_dir, '*.pth'))
    params = torch.load(args.ckpt_dir)
    model.load_state_dict(remove_dataparallel(params["state_dict"]))
    if torch.cuda.is_available():
        model.cuda()
    model.eval()

    # boxed_dir = '/data5/chao/Datasets/65D02-3853/classification/boxed_image/train'
    for itr, (images, codes, list_image_front_filename, image_path) in enumerate(tqdm(test_loader)):
        if torch.cuda.is_available():
            images = images.cuda()
        outputs = model(images).float()
        result = torch.sigmoid(outputs)

        probability = result.detach().cpu().numpy()
        pred = np.squeeze(probability)
        mask = cv2.threshold(pred, 0.5, 1, cv2.THRESH_BINARY)[1]
        mask *= 128
        img = images[0].cpu().numpy()
        img = denormalization(img, args.mean, args.std)

        boxed_out = os.path.join(os.path.join(args.save_dir,'boxed_image'), args.version + '/' + codes[0])
        label_out = os.path.join(os.path.join(args.save_dir,'boxed_label'), args.version + '/' + codes[0])
        os.makedirs(boxed_out, exist_ok=True)
        os.makedirs(label_out, exist_ok=True)
        img_ori = cv2.imread(image_path[0])
        h, w, c = img_ori.shape

        mask[mask > 0] = 1
        mask = cv2.resize(mask, (img_ori.shape[1], img_ori.shape[0]))
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(7, 7))
        mask = cv2.dilate(mask,kernel)

        mask[mask == 1.0] = 128
        mask = mask.astype('uint8')
        
        # get defect center of using grid
        if args.crop_defect_edge_first:
            sort_descending_defect_ratio = False
        else:
            sort_descending_defect_ratio = True
            
        list_defect_cen_ratio_bound = get_defect_center_in_grid(mask,
                                                                grid=[2,3],
                                                                cal_grid_by_crop_size=True,
                                                                crop_size=args.crop_size,
                                                                sort_descending_defect_ratio=sort_descending_defect_ratio)
        
        
        if codes[0] not in args.no_defect_code:
            
            crop_num_index = 0
            for grid_defect_properties in list_defect_cen_ratio_bound:
                base_tree = read_xml("./base_example.xml")
                root = base_tree.getroot()
                anno_tree = read_xml("./anno_example.xml")
                folder_node = find_nodes(base_tree, "folder")
                filename_node = find_nodes(base_tree, "filename")
                path_node = find_nodes(base_tree, "path")
                width_node = find_nodes(base_tree, "size/width")
                height_node = find_nodes(base_tree, "size/height")
                depth_node = find_nodes(base_tree, "size/depth")
                change_node_text(folder_node, codes[0])
                change_node_text(filename_node, list_image_front_filename[0] + Path(image_path[0]).suffix)
                change_node_text(path_node, image_path[0])
                change_node_text(width_node, str(w))
                change_node_text(height_node, str(h))
                change_node_text(depth_node, str(c))
                    
                if len(list_defect_cen_ratio_bound) > 0:
                    w = args.crop_size
                    h = args.crop_size
                    
                    # crop bound and readjust by at boundary 
                    x_min = grid_defect_properties[3][0]
                    y_min = grid_defect_properties[3][1]
                    x_max = grid_defect_properties[4][0]
                    y_max = grid_defect_properties[4][1]
                        
                    top_left = (x_min,y_min)
                    bottom_right = (x_max, y_max)
                    
                    img_boxed = img_ori[y_min:y_max, x_min:x_max, ...]
                    
                    # resize
                    img_boxed = cv2.resize(img_boxed, (args.crop_size, args.crop_size))
                    
                    # assert img_boxed.shape==(args.crop_size,args.crop_size,3), f'The size is {img_boxed.shape}, x is {x} and y is {y}.'
                    save_boxed_path = os.path.join(boxed_out, list_image_front_filename[0] + f'_{top_left}_{bottom_right}' + '.png')
                    cv2.imwrite(save_boxed_path, img_boxed)

                    # save xml
                    xmin_node = find_nodes(anno_tree, "bndbox/xmin")
                    ymin_node = find_nodes(anno_tree, "bndbox/ymin")
                    xmax_node = find_nodes(anno_tree, "bndbox/xmax")
                    ymax_node = find_nodes(anno_tree, "bndbox/ymax")
                    change_node_text(xmin_node, str(x_min))
                    change_node_text(ymin_node, str(y_min))
                    change_node_text(xmax_node, str(x_max))
                    change_node_text(ymax_node, str(y_max))
                    root.append(anno_tree.getroot())
                    indent(root)
                    save_xml_path = os.path.join(label_out, list_image_front_filename[0] + f'_{top_left}_{bottom_right}' + '.xml')
                    write_xml(base_tree, save_xml_path)
                    
                    crop_num_index += 1
                    if crop_num_index == args.max_crop_num_per_image:
                        break

        else:
            base_tree = read_xml("./base_example.xml")
            root = base_tree.getroot()
            anno_tree = read_xml("./anno_example.xml")
            folder_node = find_nodes(base_tree, "folder")
            filename_node = find_nodes(base_tree, "filename")
            path_node = find_nodes(base_tree, "path")
            width_node = find_nodes(base_tree, "size/width")
            height_node = find_nodes(base_tree, "size/height")
            depth_node = find_nodes(base_tree, "size/depth")
            change_node_text(folder_node, codes[0])
            change_node_text(filename_node, list_image_front_filename[0] + Path(image_path[0]).suffix)
            change_node_text(path_node, image_path[0])
            change_node_text(width_node, str(w))
            change_node_text(height_node, str(h))
            change_node_text(depth_node, str(c))
            
            # img_boxed = RandomCrop(224,224,p=1.0)(image=img_ori)['image']
            img_boxed, y_min, x_min, y_max, x_max = random_crop(img_ori, args.crop_size)
            # assert img_boxed.shape==(args.crop_size,args.crop_size,3), f'The size is {img_boxed.shape}, x is {x} and y is {y}.'
            cv2.imwrite(os.path.join(boxed_out, list_image_front_filename[0] + '.png'), img_boxed)

            xmin_node = find_nodes(anno_tree, "bndbox/xmin")
            ymin_node = find_nodes(anno_tree, "bndbox/ymin")
            xmax_node = find_nodes(anno_tree, "bndbox/xmax")
            ymax_node = find_nodes(anno_tree, "bndbox/ymax")
            change_node_text(xmin_node, str(x_min))
            change_node_text(ymin_node, str(y_min))
            change_node_text(xmax_node, str(x_max))
            change_node_text(ymax_node, str(y_max))
            root.append(anno_tree.getroot())
            indent(root)
            write_xml(base_tree, os.path.join(label_out, list_image_front_filename[0] + '.xml'))
                

if __name__ == '__main__':
    main()