import sys
sys.path.append('..')
from tkinter.tix import Tree
from typing_extensions import Self, assert_type
import torch
from torch.utils.data import DataLoader
import segmentation_models_pytorch as smp
from utils.helper import get_subdirs, remove_dataparallel
import os
import random
import argparse
import pandas as pd
import glob
from pathlib import Path
from tqdm import tqdm
import numpy as np
from sklearn.model_selection import train_test_split
from datasets import T7_Mask_Dataset, T2_Seg_Dataset, T2_Seg_Test_Dataset
# from datasets.transformations import multi_transforms_medium
from datasets.transformations import resize_transform_basic
import platform
import cv2
# import yaml
from PIL import Image
# import json
from sklearn.model_selection import train_test_split
from utils.helper import denormalization
from utils.helper import read_xml, find_nodes, change_node_text, indent, write_xml
# from albumentations import RandomCrop
# import matplotlib.pyplot as plt

os.environ["CUDA_VISIBLE_DEVICES"] = '3'
# os.environ["CUDA_LAUNCH_BLOCKING"] = "1"

def parse_args():
    """
    Set args parameters
    """
    parser = argparse.ArgumentParser(description='Seg model turnon test.')
    parser.add_argument('--version', default='1.0', help='Data version.')
    parser.add_argument("--use_resize", type=bool, default=True, choices=[True, False], help="Define model name")
    parser.add_argument("--img_height", type=int, default=1024, help="size of image height")
    parser.add_argument("--img_width", type=int, default=1024, help="size of image width")
    parser.add_argument("--mean", nargs='+', default=[0.485, 0.456, 0.406], help="Define the mean for image normalization.")
    parser.add_argument("--std", nargs='+', default=[0.229, 0.224, 0.225], help="Define the std for image normalization.")
    parser.add_argument("--model", type=str, default='Unet', choices=['Unet', 'DeepLabV3Plus' 'PSPNet'], help="Define model name")
    parser.add_argument("--batch_size", type=int, default=1, help="Define model name")
    parser.add_argument("--backbone", type=str, default='se_resnext50_32x4d', choices=['resnet50', 'se_resnext50_32x4d', 'mobilenet_v2', 'timm-mobilenetv3_small_100', 'resnet18'], help="Define model name")
    parser.add_argument("--img_dirs", nargs='+', default=['/data2/autorepair/ruanzhifeng/autorepair_t7_10/T6_Vtech/T643D02/data_accumulation'], help="Define the data location.")
    parser.add_argument("--ckpt_dir", type=str, default='/data2/autorepair/ruanzhifeng/autorepair_t7_10/code/adc_segmentation/ckpts/AA_T6_Vtech/0606_1611_Unet_se_resnext50_32x4d/Unet_se_resnext50_32x4d_best_iou.pth', help="Define where to save model checkpoints.")
    parser.add_argument("--save_dir", type=str, default='/data2/autorepair/ruanzhifeng/autorepair_t7_10/T6_Vtech/T643D02/data_accumulation_pred', help="Define where to save evaluate results.")

    parser.add_argument("--big_model_thr", type=float, default=0.5, help="Define the crop size")
    parser.add_argument("--crop_size", type=int, default=100, help="Define the crop size")
    
    parser.add_argument("--use_gpu", type=bool, default=True, help="Define whether use GPU")
    # parser.add_argument("--codes", nargs='+', default=['TE06', 'TI01', 'TI02', 'TI04', 'TP07', 'TP12', 'TP14'], help="codes")
    parser.add_argument("--no_defect_code", nargs='+', default=['OK-eleback-50X-Charm', 'OK-elefront-50X-Charm', 'OK', "TGXID"], help="Define the dir to store csv files.")
    parser.add_argument("--img_suffixs", nargs='+', default=['jpg', 'JPG', 'png', 'PNG', 'JPEG', 'jpeg'], help="Define the")

    args = parser.parse_args()

    return args

def random_crop(img, size):
    h, w = img.shape[:-1] #h,w,channel [:-1] beside the final element, such as channel 
    x = random.randint(0, w-size) #random number
    y = random.randint(0, h-size)

    crop_img = img[y:y+size, x:x+size].copy()
    y_min = y
    y_max = y+size
    x_min = x
    x_max = x+size

    return crop_img, y_min, x_min, y_max, x_max

def get_extensions(path):
    ListFiles = os.walk(path)
    SplitTypes = []
    for walk_output in ListFiles:
        for file_name in walk_output[-1]:
            SplitTypes.append(file_name.split(".")[-1])
    extensions, counts = np.unique(SplitTypes, return_counts=True)

    try:
        return_ext = extensions[np.argmax(counts)]
    except:
        return_ext = 'jpg'

    return return_ext

def get_slice_bboxes(image_height: int,
        image_width: int,
        slice_height: int = 480,  # no use now
        slice_width: int = 640,
        overlap_height_ratio: int = 0.2,
        overlap_width_ratio: int = 0.2,
):
    slice_bboxes = []
    half_h = int(image_height // 2)
    half_w = int(image_width // 2)
    for i in range(2):
        for j in range(2):
            slice_bboxes.append([i * half_h, (i + 1) * half_h, j * half_w, (j + 1) * half_w])
    return slice_bboxes

def normalize(img, mean, std, max_pixel_value=255.0):
    mean = np.array(mean, dtype=np.float32)
    mean *= max_pixel_value

    std = np.array(std, dtype=np.float32)
    std *= max_pixel_value

    denominator = np.reciprocal(std, dtype=np.float32)

    img = img.astype(np.float32)
    img -= mean
    img *= denominator
    return img

# def one_img_cut(img, mask):
#     h, w = img.shape[:2]
#     # mid_h = h//2
#     # mid_w = w//2

#     new_imgs, new_masks = [], []
#     # for dh, dw in [[0, 0], [0, 1], [1, 1], [1, 0]]:
#     #     new_imgs.append(img[dh*mid_h:(dh+1)*mid_h, dw*mid_w:(dw+1)*mid_w])
#     #     new_masks.append(mask[dh*mid_h:(dh+1)*mid_h, dw*mid_w:(dw+1)*mid_w])
    
#     x1, x2, x3, x4 = 0, 0, w - std_w, w - std_w  # for w
#     y1, y2, y3, y4 = 0, h - std_h, 0, h - std_h  # for h
#     for tx, ty in zip([x1, x2, x3, x4], [y1, y2, y3, y4]):
#         new_imgs.append(img[ty:ty+std_h, tx:tx+std_w])
#         new_masks.append(mask[ty:ty+std_h, tx:tx+std_w])

# def small_prepross_by_train_shape(args, img):
#     std_w, std_h = 864, 640
#     h, w = img.shape[:2]
#     x1, x2, x3, x4 = 0, 0, w - std_w, w - std_w  # for w
#     y1, y2, y3, y4 = 0, h - std_h, 0, h - std_h  # for h
#     patches = []
#     for tx, ty in zip([x1, x2, x3, x4], [y1, y2, y3, y4]):
#         patch = img[ty:ty+std_h, tx:tx+std_w]
#         patch = cv2.cvtColor(patch, cv2.COLOR_BGR2RGB)  # 转换颜色通道，这一步非常重要，用cv2读取图片是BGR通道，模型需要的输入时RGB通道
#         # patch = cv2.resize(patch, (args.img_width, args.img_height), interpolation=cv2.INTER_LINEAR)  # 转换图片大小，这一步也非常重要
#         patch = normalize(patch, args.mean, args.std, max_pixel_value=255.0)  # 归一化图片，这一步非常重要
#         patches.append(patch)
#         # new_imgs.append(img[ty:ty+std_h, tx:tx+std_w])
#         # new_masks.append(mask[ty:ty+std_h, tx:tx+std_w])
#     patches = np.array(patches)
#     patches = np.moveaxis(patches, -1, 1).astype(np.float32)
#     patches = torch.from_numpy(patches)
#     return patches

# def small_preprocess(args, img):
#     H, W = img.shape[:2]
#     patches = []
#     slice_bboxes = get_slice_bboxes(H, W)
#     for idx, bbox in enumerate(slice_bboxes):
#         ymin, ymax, xmin, xmax = bbox
#         patch = img[ymin: ymax, xmin: xmax, ...]
#         patch = cv2.cvtColor(patch, cv2.COLOR_BGR2RGB)  # 转换颜色通道，这一步非常重要，用cv2读取图片是BGR通道，模型需要的输入时RGB通道
#         patch = cv2.resize(patch, (args.img_width, args.img_height), interpolation=cv2.INTER_LINEAR)  # 转换图片大小，这一步也非常重要
#         patch = normalize(patch, args.mean, args.std, max_pixel_value=255.0)  # 归一化图片，这一步非常重要
#         patches.append(patch)
#     patches = np.array(patches)
#     patches = np.moveaxis(patches, -1, 1).astype(np.float32)
#     patches = torch.from_numpy(patches)
#     return patches

# def mask_merge(full_img_shape, masks):
#     full_h, full_w = full_img_shape[:2]
#     single_h, single_w = masks[0].shape[:2]
#     full_mask = np.zeros((full_img_shape[:2]))
#     x1, x2, x3, x4 = 0, 0, full_w - single_w, full_w - single_w  # for w
#     y1, y2, y3, y4 = 0, full_h - single_h, 0, full_h - single_h  # for h

#     for tx, ty, single_mask in zip([x1, x2, x3, x4], [y1, y2, y3, y4], masks):
#         full_mask[ty:ty+single_h, tx:tx+single_w] += single_mask
    
#     return full_mask.astype(np.uint8)

def main():
    args = parse_args()

    # make sure that the mean and std are float list not str list
    args.mean = [float(x) for x in args.mean]
    args.std = [float(x) for x in args.std]


    # Search Data and Mask dir to create dataframe for training
    print('Loading the datasets ...')
    if os.path.isdir(args.img_dirs[0]):
        for img_path in args.img_dirs:
            code_name_list = os.listdir(img_path)
            df_total = pd.DataFrame()
            for code_name in tqdm(code_name_list):

                # if code_name not in args.codes:
                #     print(code_name)
                #     continue
                data_dir = os.path.join(img_path, code_name)
                img_list = []
                for img_suffix in args.img_suffixs:
                    img_list.extend(sorted(glob.glob(os.path.join(data_dir, '*.'+img_suffix))))
                for img_cache in tqdm(img_list, f'{code_name}'):
                    try:
                        # file_ = Image.open(img_cache).load()
                        df_total_cache = pd.DataFrame()
                        # base_name = Path(img_cache).stem
                        df_total_cache['image'] = [img_cache]
                        df_total_cache['label'] = [code_name]
                        df_total = pd.concat([df_total, df_total_cache])
                    except IOError:
                        print('Image file %s has something wrong, will not used for train.' %img_cache)

        df_test = df_total
    elif os.path.isfile(args.img_dirs) and Path(args.img_dirs).suffix == '.csv':
        df_test = pd.DataFrame()
        for img_path in args.img_dirs:
            df_tmp = pd.read_csv(img_path)
            df_test = pd.concat([df_test, df_tmp])
        df_test = df_test.reset_index(drop=True)

    else:
        raise ValueError('Img dir should be a dir or a csv file.')

    print(f'The data set shape is {df_test.shape}.')

    test_dataset = T2_Seg_Test_Dataset(df_test,
                                    resize_transform_basic(img_size=(args.img_height,args.img_width),
                                                            mean=args.mean,
                                                            std=args.std),
                                    )
    kwargs = {'num_workers': 3, 'pin_memory': True} if (torch.cuda.is_available() and platform.system() == 'Linux' and args.use_gpu) else {'num_workers': 15}
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, **kwargs)
    print('Model params has been loaded from %s.' %args.ckpt_dir)
    os.makedirs(args.save_dir, exist_ok=True)

    # Setup Seg Model
    model = getattr(smp, args.model)(args.backbone, encoder_weights=None, classes=1, activation=None)
    # pth_dir = glob.glob(os.path.join(args.ckpt_dir, '*.pth'))
    params = torch.load(args.ckpt_dir, map_location='cpu')
    model.load_state_dict(remove_dataparallel(params["state_dict"]))
    if torch.cuda.is_available() and args.use_gpu:
        model.cuda()
    model.eval()

    memo_check_big_no_defect = {}
    memo_check_small_no_defect = {}
    no_defect_num = 0  # 统计标注无缺陷数量
    no_defect_detected_num = 0  # 统计过检数量
    df_miss_detect = pd.DataFrame()  # 统计漏检图像名称及其code名称

    base_tree = read_xml("./datasets/base_example.xml")
    root = base_tree.getroot()
    anno_tree = read_xml("./datasets/anno_example.xml")

    for itr, (images, codes, names, image_path) in enumerate(tqdm(test_loader)):
        if torch.cuda.is_available() and args.use_gpu:
                images = images.cuda()
        with torch.no_grad():
            outputs = model(images).float()
        result = torch.sigmoid(outputs)

        probability = result.detach().cpu().numpy()

        for i in range(len(codes)):
            code = codes[i]
            img_ori = cv2.imread(image_path[i], 1)
                
            pred = np.squeeze(probability[i])
            mask = cv2.threshold(pred, args.big_model_thr, 1, cv2.THRESH_BINARY)[1]
            mask[mask > 0] = 1
            mask = cv2.resize(mask, (img_ori.shape[1], img_ori.shape[0]))

            tmp_contours, _ = cv2.findContours(mask.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

            if len(tmp_contours) == 0: # 大图检测未检出缺陷
                memo_check_big_no_defect[code] = memo_check_big_no_defect.get(code, 0) + 1

            img = images[i].cpu().numpy()
            img = denormalization(img, args.mean, args.std)

            boxed_out = os.path.join(args.save_dir, 'boxed_image', code)
            label_out = os.path.join(args.save_dir, 'boxed_label', code)
            pred_mask = os.path.join(args.save_dir, 'pred_mask', code)
            os.makedirs(boxed_out, exist_ok=True)
            os.makedirs(label_out, exist_ok=True)
            os.makedirs(pred_mask, exist_ok=True)
            
            h, w, c = img_ori.shape

            folder_node = find_nodes(base_tree, "folder")
            filename_node = find_nodes(base_tree, "filename")
            path_node = find_nodes(base_tree, "path")
            width_node = find_nodes(base_tree, "size/width")
            height_node = find_nodes(base_tree, "size/height")
            depth_node = find_nodes(base_tree, "size/depth")
            change_node_text(folder_node, code)
            change_node_text(filename_node, names[0] + Path(image_path[0]).suffix)
            change_node_text(path_node, image_path[0])
            change_node_text(width_node, str(w))
            change_node_text(height_node, str(h))
            change_node_text(depth_node, str(c))

            kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(7, 7))
            mask = cv2.dilate(mask,kernel)

            mask[mask == 1.0] = 128
            mask = mask.astype('uint8')

            contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            x_list = []
            y_list = []
            radius_list =[]

            if code in args.no_defect_code:  # 无缺陷code，统计是否检测出缺陷，作为过检的计算
                no_defect_num += 1
                if len(contours) != 0:
                    no_defect_detected_num += 1
            elif len(contours) == 0:  # 有缺陷code，未检出缺陷则记录
                new_row = pd.DataFrame({'code': [code], 'img_path': [image_path[i]]})
                df_miss_detect = pd.concat([new_row, df_miss_detect])   # df_miss_detect.append(new_row, ignore_index=True)

            for cnt in contours:
                (x,y), radius = cv2.minEnclosingCircle(cnt)
                x_list.append(x)
                y_list.append(y)
                radius_list.append(radius)
            if len(radius_list) > 0:
                cv2.imwrite(os.path.join(pred_mask, names[i] + '.png'), mask)  # 预测出缺陷，则保存mask
                index = np.argmax(radius_list)
                # defect_size_diameter = 2 * np.nanmax(radius_list)
                x, y, w, h = cv2.boundingRect(contours[index])

                center_x, center_y = x + w / 2, y + h / 2
                half_w, half_h = w / 2, h / 2
                half_w, half_h = half_w + 10, half_h + 10
                if half_w <= args.crop_size//2 and half_h <= args.crop_size//2:
                    half_w, half_h = args.crop_size//2, args.crop_size//2
                elif max(half_w, half_h) <= 150:
                    # bigger = max(half_w, half_h)
                    half_w, half_h = 150, 150
                elif half_w / half_h >= 3:
                    half_h *= 2
                elif half_h / half_w >= 3:
                    half_w *= 2
                
                y_min = int(max(0, center_y - half_h))
                x_min = int(max(0, center_x - half_w))
                y_max = int(min(img_ori.shape[0], center_y + half_h))
                x_max = int(min(img_ori.shape[1], center_x + half_w))

                # x_min, x_max, y_min, y_max = x, x+w, y, y+h
                img_boxed = img_ori[y_min:y_max, x_min:x_max, ...]
            
                # assert img_boxed.shape==(args.crop_size,args.crop_size,3), f'The size is {img_boxed.shape}, x is {x} and y is {y}.'
                cv2.imwrite(os.path.join(boxed_out, names[i] + '.png'), img_boxed)

                xmin_node = find_nodes(anno_tree, "bndbox/xmin")
                ymin_node = find_nodes(anno_tree, "bndbox/ymin")
                xmax_node = find_nodes(anno_tree, "bndbox/xmax")
                ymax_node = find_nodes(anno_tree, "bndbox/ymax")
                change_node_text(xmin_node, str(x_min))
                change_node_text(ymin_node, str(y_min))
                change_node_text(xmax_node, str(x_max))
                change_node_text(ymax_node, str(y_max))
                root.append(anno_tree.getroot())
                indent(root)
                write_xml(base_tree, os.path.join(label_out, names[i] + '.xml'))

    print('memo big no defect: ', memo_check_big_no_defect)
    print('memo small no defect: ', memo_check_small_no_defect)
    print('no_defect_num: ', no_defect_num)
    print('no_defect_detected_num: ', no_defect_detected_num)

    df_miss_detect.to_csv(os.path.join(args.save_dir, 'miss_detect.csv'), index=False)

    # # Create csv files for training
    # df_total = pd.DataFrame()

    # code_name_list = get_subdirs(args.save_dir)
    # for code_name in code_name_list:
    #     data_dir = os.path.join(args.save_dir, code_name)
    #     # img_ext = get_extensions(data_dir)
    #     # Find out all of the images
    #     img_list = sorted(glob.glob(os.path.join(data_dir, '*.jpg')))
    #     img_list.extend(sorted(glob.glob(os.path.join(data_dir, '*.JPG'))))
    #     img_list.extend(sorted(glob.glob(os.path.join(data_dir, '*.png'))))
    #     for img_cache in tqdm(img_list):
    #         try:
    #             file_ = Image.open(img_cache).load()
    #             df_total_cache = pd.DataFrame()

    #             df_total_cache['image'] = [img_cache]
    #             # df_total_cache['mask'] = [os.path.join(code_mask_dir, base_name + '.png')]
    #             df_total_cache['code'] = [code_name]
    #             df_total_cache['product'] = ['65D02']
    #             df_total = pd.concat([df_total, df_total_cache])

    #         except IOError:
    #             print('Image file %s has something wrong, will not used for train.' %img_cache)

    # df_train, df_valid = train_test_split(df_total, test_size=args.val_ratio, random_state=752)
    # # df_total.to_csv(os.path.join(args.csv_dir, f'65D02_box_valid.csv'), index=False)
    # df_train.to_csv(os.path.join(args.csv_dir,f'{args.product}_{args.site}_train_{args.version}.csv'), index=False)
    # df_valid.to_csv(os.path.join(args.csv_dir,f'{args.product}_{args.site}_valid_{args.version}.csv'), index=False)

    # # Concat train csv and valid csv
    # train_total = pd.DataFrame()
    # for train_data in glob.glob(os.path.join(args.csv_dir,f'{args.product}_{args.site}_train_*.csv')):
    #     print(train_data)
    #     train_total = pd.concat([train_total, pd.read_csv(train_data)])
    # valid_total = pd.DataFrame()
    # for val_data in glob.glob(os.path.join(args.csv_dir,f'{args.product}_{args.site}_valid_*.csv')):
    #     print(val_data)
    #     valid_total = pd.concat([valid_total, pd.read_csv(val_data)])

    # train_total.to_csv(os.path.join(args.csv_dir,f'{args.product}_{args.site}_train_total.csv'), index=False)
    # valid_total.to_csv(os.path.join(args.csv_dir,f'{args.product}_{args.site}_valid_total.csv'), index=False)

if __name__ == '__main__':
    main()
