import sys
sys.path.append('..')
import torch
import torch.nn as nn
import segmentation_models_pytorch as smp
from utils.helper import get_subdirs, set_logger, GPUManager, remove_dataparallel
from utils.helper import Viz_HTML
from models.losses import BCEDiceLoss, DiceLoss
import os
import sys
import random
import argparse
import pandas as pd
import glob
import time
from pathlib import Path
from tqdm import tqdm
import numpy as np
from sklearn.model_selection import train_test_split
from datasets import T7_Mask_Dataset
from datasets.transformations import multi_transforms_medium
from datasets.transformations import resize_transform_basic
import platform
import cv2
import yaml
from PIL import Image
# import json
from sklearn.model_selection import train_test_split
from utils.helper import denormalization
from utils.helper import read_xml, find_nodes, change_node_text, indent, write_xml
from albumentations import RandomCrop
import matplotlib
import matplotlib.pyplot as plt

def parse_args():
    """
    Set args parameters
    """
    parser = argparse.ArgumentParser(description='Seg model turnon test.')
    parser.add_argument('--version', default='4.1', help='Data version.')
    parser.add_argument("--img_height", type=int, default=768, help="size of image height")
    parser.add_argument("--img_width", type=int, default=768, help="size of image width")
    parser.add_argument("--mean", nargs='+', default=[0.485, 0.456, 0.406], help="Define the mean for image normalization.")
    parser.add_argument("--std", nargs='+', default=[0.229, 0.224, 0.225], help="Define the std for image normalization.")
    parser.add_argument("--model", type=str, default='Unet', choices=['Unet', 'DeepLabV3Plus' 'PSPNet'], help="Define model name")
    parser.add_argument("--backbone", type=str, default='se_resnext50', choices=['resnet50', 'se_resnext50', 'mobilenet_v2'], help="Define model name")
    parser.add_argument("--img_dir", type=str, default='/data2/T7/33R0/classfication_train_data/33r0_clstrain_0705/65D02_33R0_v1.0_train', help="Define the data location.")
    parser.add_argument("--ckpt_dir", type=str, default='/data2/lihong/adc_models/65D02_33R0/git_code/65d02-38r0/project-root/depends/detection/v1.0/model.pth', help="Define where to save model checkpoints.")
    parser.add_argument("--save_dir", type=str, default='/data2/T7/33R0/classfication_train_data/', help="Define where to save evaluate results.")
    parser.add_argument("--csv_dir", type=str, default='./csv_lijia_v3', help="Define the dir to store csv files.")
    # parser.add_argument("--val_ratio", type=float, default=0.2, help="Define the ratio of validation set.")
    parser.add_argument("--crop_size", type=int, default=224, help="Define the crop size")
    parser.add_argument("--gpu_num", type=int, default=1, help="Define how many gpus used to train this model")
    parser.add_argument("--no_defect_code", nargs='+', default=['TSFAS', 'TSDFS', 'NOCOD', 'NOCOD2', 'TFOL0', 'TSFIX'], help="Define the dir to store csv files.")

    args = parser.parse_args()

    return args

def get_extensions(path):
    ListFiles = os.walk(path)
    SplitTypes = []
    for walk_output in ListFiles:
        for file_name in walk_output[-1]:
            SplitTypes.append(file_name.split(".")[-1])
    extensions, counts = np.unique(SplitTypes, return_counts=True)

    try:
        return_ext = extensions[np.argmax(counts)]
    except:
        return_ext = 'jpg'

    return return_ext

def main():
    args = parse_args()
    # choose free gpus to train
    gpu_list = ",".join([str(x) for x in GPUManager().auto_choice(gpu_num=args.gpu_num)])
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_list)

    # make sure that the mean and std are float list not str list
    args.mean = [float(x) for x in args.mean]
    args.std = [float(x) for x in args.std]
    # args.save_dir = os.path.join(args.save_dir, args.version)
    # Search Data and Mask dir to create dataframe for training
    print('Loading the datasets ...')
    if os.path.isdir(args.img_dir):
        code_name_list = get_subdirs(args.img_dir)
        df_total = pd.DataFrame()
        for code_name in tqdm(code_name_list):
            data_dir = os.path.join(args.img_dir, code_name)
            img_list = sorted(glob.glob(os.path.join(data_dir, '*.jpg')))
            img_list.extend(sorted(glob.glob(os.path.join(data_dir, '*.JPG'))))
            img_list.extend(sorted(glob.glob(os.path.join(data_dir, '*.png'))))
            for img_cache in tqdm(img_list):
                try:
                    file_ = Image.open(img_cache).load()
                    df_total_cache = pd.DataFrame()
                    base_name = Path(img_cache).stem
                    df_total_cache['image'] = [img_cache]
                    df_total_cache['label'] = [code_name]
                    df_total = pd.concat([df_total, df_total_cache])
                except IOError:
                    print('Image file %s has something wrong, will not used for train.' %img_cache)

        df_test = df_total
    elif os.path.isfile(args.img_dir) and Path(args.img_dir).suffix == '.csv':
        df_test = pd.read_csv(args.img_dir)
    else:
        raise ValueError('Img dir should be a dir or a csv file.')

    print(f'The data set shape is {df_test.shape}.')

    test_dataset = T7_Mask_Dataset(df_test, 
                                    resize_transform_basic(img_size=(args.img_height,args.img_width),
                                                            mean=args.mean,
                                                            std=args.std)
                                    )
    kwargs = {'num_workers': 8, 'pin_memory': True} if (torch.cuda.is_available() and platform.system() == 'Linux') else {}
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=True, **kwargs)
    print('Model params has been loaded from %s.' %args.ckpt_dir)
    # Setup Seg Model
    if args.backbone =='se_resnext50':
        args.backbone = 'se_resnext50_32x4d'
    model = getattr(smp, args.model)(args.backbone, encoder_weights=None, classes=1, activation=None)
    # pth_dir = glob.glob(os.path.join(args.ckpt_dir, '*.pth'))
    params = torch.load(args.ckpt_dir)
    model.load_state_dict(remove_dataparallel(params["state_dict"]))
    if torch.cuda.is_available():
        model.cuda()
    model.eval()

    # boxed_dir = '/data5/chao/Datasets/65D02-3853/classification/boxed_image/train'
    for itr, (images, codes, names, image_path) in enumerate(tqdm(test_loader)):
        # print(codes[0])
        # if itr == args.viz_num:
        #     break 
        base_tree = read_xml("./base_example.xml")
        root = base_tree.getroot()
        anno_tree = read_xml("./anno_example.xml")

        if torch.cuda.is_available():
            images = images.cuda()
        outputs = model(images).float()
        result = torch.sigmoid(outputs)

        probability = result.detach().cpu().numpy()
        pred = np.squeeze(probability)
        mask = cv2.threshold(pred, 0.5, 1, cv2.THRESH_BINARY)[1]
        mask *= 128
        img = images[0].cpu().numpy()
        img = denormalization(img, args.mean, args.std)

        boxed_out = os.path.join(os.path.join(args.save_dir,'boxed_image'), args.version + '/' + codes[0])
        label_out = os.path.join(os.path.join(args.save_dir,'boxed_label'), args.version + '/' + codes[0])
        os.makedirs(boxed_out, exist_ok=True)
        os.makedirs(label_out, exist_ok=True)
        img_ori = cv2.imread(image_path[0])
        h, w, c = img_ori.shape

        folder_node = find_nodes(base_tree, "folder")
        filename_node = find_nodes(base_tree, "filename")
        path_node = find_nodes(base_tree, "path")
        width_node = find_nodes(base_tree, "size/width")
        height_node = find_nodes(base_tree, "size/height")
        depth_node = find_nodes(base_tree, "size/depth")
        change_node_text(folder_node, codes[0])
        change_node_text(filename_node, names[0] + Path(image_path[0]).suffix)
        change_node_text(path_node, image_path[0])
        change_node_text(width_node, str(w))
        change_node_text(height_node, str(h))
        change_node_text(depth_node, str(c))

        mask[mask > 0] = 1
        mask = cv2.resize(mask, (img_ori.shape[1], img_ori.shape[0]))
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(7, 7))
        mask = cv2.dilate(mask,kernel)

        mask[mask == 1.0] = 128
        mask = mask.astype('uint8')
        contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        x_list = []
        y_list = []
        radius_list =[]
        if codes[0] not in args.no_defect_code:
            for cnt in contours:
                (x,y), radius = cv2.minEnclosingCircle(cnt)
                x_list.append(x)
                y_list.append(y)
                radius_list.append(radius)
            if len(radius_list) > 0:
                index = np.argmax(radius_list)
                # print(x_list[index], y_list[index])
                w = args.crop_size
                h = args.crop_size
                x = int(x_list[index])
                y = int(y_list[index])
                if int(y-int(h/2)+1) < 0:
                    y = y + (0 - int(y-int(h/2)+1))
                if int(x-int(w/2)+1) < 0:
                    x = x + (0 - int(x-int(w/2)+1))
                if int(y+int(h/2)+1) > img_ori.shape[0]:
                    y = y - (int(y+int(h/2)+1) - img_ori.shape[0])
                if int(x+int(w/2)+1) > img_ori.shape[1]:
                    x = x - (int(x+int(w/2)+1) - img_ori.shape[1])
                y_min = int(y-int(h/2)+1)
                x_min = int(x-int(w/2)+1)
                y_max = int(y+int(h/2)+1)
                x_max = int(x+int(w/2)+1)

                img_boxed = img_ori[y_min:y_max, x_min:x_max, ...]
                assert img_boxed.shape==(args.crop_size,args.crop_size,3), f'The size is {img_boxed.shape}, x is {x} and y is {y}.'
                cv2.imwrite(os.path.join(boxed_out, names[0] + '.png'), img_boxed)

                xmin_node = find_nodes(anno_tree, "bndbox/xmin")
                ymin_node = find_nodes(anno_tree, "bndbox/ymin")
                xmax_node = find_nodes(anno_tree, "bndbox/xmax")
                ymax_node = find_nodes(anno_tree, "bndbox/ymax")
                change_node_text(xmin_node, str(x_min))
                change_node_text(ymin_node, str(y_min))
                change_node_text(xmax_node, str(x_max))
                change_node_text(ymax_node, str(y_max))
                root.append(anno_tree.getroot())
                indent(root)
                write_xml(base_tree, os.path.join(label_out, names[0] + '.xml'))

        else:
            
            # img_boxed = RandomCrop(224,224,p=1.0)(image=img_ori)['image']
            img_boxed, y_min, x_min, y_max, x_max = random_crop(img_ori, args.crop_size)
            assert img_boxed.shape==(args.crop_size,args.crop_size,3), f'The size is {img_boxed.shape}, x is {x} and y is {y}.'
            cv2.imwrite(os.path.join(boxed_out, names[0] + '.png'), img_boxed)

            xmin_node = find_nodes(anno_tree, "bndbox/xmin")
            ymin_node = find_nodes(anno_tree, "bndbox/ymin")
            xmax_node = find_nodes(anno_tree, "bndbox/xmax")
            ymax_node = find_nodes(anno_tree, "bndbox/ymax")
            change_node_text(xmin_node, str(x_min))
            change_node_text(ymin_node, str(y_min))
            change_node_text(xmax_node, str(x_max))
            change_node_text(ymax_node, str(y_max))
            root.append(anno_tree.getroot())
            indent(root)
            write_xml(base_tree, os.path.join(label_out, names[0] + '.xml'))
    # # Create csv files for training    
    # df_total = pd.DataFrame()

    # code_name_list = get_subdirs(args.save_dir)
    # for code_name in code_name_list:
    #     data_dir = os.path.join(args.save_dir, code_name)
    #     # img_ext = get_extensions(data_dir)
    #     # Find out all of the images
    #     img_list = sorted(glob.glob(os.path.join(data_dir, '*.jpg')))
    #     img_list.extend(sorted(glob.glob(os.path.join(data_dir, '*.JPG'))))
    #     img_list.extend(sorted(glob.glob(os.path.join(data_dir, '*.png'))))
    #     for img_cache in tqdm(img_list):
    #         try:
    #             file_ = Image.open(img_cache).load()
    #             df_total_cache = pd.DataFrame()

    #             df_total_cache['image'] = [img_cache]
    #             # df_total_cache['mask'] = [os.path.join(code_mask_dir, base_name + '.png')]
    #             df_total_cache['code'] = [code_name]
    #             df_total_cache['product'] = ['65D02']
    #             df_total = pd.concat([df_total, df_total_cache])

    #         except IOError:
    #             print('Image file %s has something wrong, will not used for train.' %img_cache)

    # df_train, df_valid = train_test_split(df_total, test_size=args.val_ratio, random_state=752)
    # # df_total.to_csv(os.path.join(args.csv_dir, f'65D02_box_valid.csv'), index=False)
    # df_train.to_csv(os.path.join(args.csv_dir,f'{args.product}_{args.site}_train_{args.version}.csv'), index=False)
    # df_valid.to_csv(os.path.join(args.csv_dir,f'{args.product}_{args.site}_valid_{args.version}.csv'), index=False)

    # # Concat train csv and valid csv
    # train_total = pd.DataFrame()
    # for train_data in glob.glob(os.path.join(args.csv_dir,f'{args.product}_{args.site}_train_*.csv')):
    #     print(train_data)
    #     train_total = pd.concat([train_total, pd.read_csv(train_data)])
    # valid_total = pd.DataFrame()
    # for val_data in glob.glob(os.path.join(args.csv_dir,f'{args.product}_{args.site}_valid_*.csv')):
    #     print(val_data)
    #     valid_total = pd.concat([valid_total, pd.read_csv(val_data)])

    # train_total.to_csv(os.path.join(args.csv_dir,f'{args.product}_{args.site}_train_total.csv'), index=False)
    # valid_total.to_csv(os.path.join(args.csv_dir,f'{args.product}_{args.site}_valid_total.csv'), index=False)

if __name__ == '__main__':
    main()