import os
import json
import sys
from argparse import ArgumentParser

import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from skimage.feature import corner_peaks, corner_harris

from osgeo import gdal
import rasterio

from utils.preprocess import bilatera_blur

from tools.dataset_helper import open_img_np

if __name__ == "__main__":
    from spn_helper import (ChangePointDict, FillNameID, dict_to_RGB_plotimg,
        open_spn_as_np)
    from sen12_helper import (open_sen12_as_np, json_helper)
else:
    from .spn_helper import (ChangePointDict, FillNameID, dict_to_RGB_plotimg,
        open_spn_as_np)
    from .sen12_helper import (open_sen12_as_np, json_helper)

def find_keypoints(
    img, mask_img=None, *, scheme="SURF", radius=None, 
    nodata_border=32,  exclude_border=32, threshold_abs=1e-2
):
    
    '''
    使用角点检测算法检测图像角点坐标

    Param:
    -----
    img: 输入的8位深单通道图像
    scheme: "SURF"\"SIFT"\"BRISK"\"ORB"\"HARRIS"，其中harris使用skimage，其他使用opencv
    radius: 确保harris响应峰值相隔距离大小（像素）
    exclude_border: 图片边界若干像素以内不寻找角点
    nodata_border: nodata边界若干像素以内不寻找角点

    Return:
    -----
    返回cv.KeyPoint对象列表，其中harris算法的结果仅坐标和响应值有意义
    '''
    if scheme == "SURF":
        detector = cv.xfeatures2d.SURF_create(hessianThreshold=400, nOctaves=4, nOctaveLayers=3, extended=False, upright=True)
    elif scheme == "SIFT":
        detector = cv.xfeatures2d.SIFT_create(nOctaveLayers=3, sigma=1.3)
        # detector = cv.SIFT_create(nOctaveLayers=3, sigma=1.3)
    elif scheme == "BRISK":
        detector = cv.BRISK_create(thresh=30, octaves=3)
    elif scheme == "ORB":
        detector = cv.ORB_create(nfeatures=10000)

    if scheme not in ["HARRIS"]:
        kpts = detector.detect(img, None)
    else:
        kwargs = {}
        if radius != None:
            kwargs['min_distance'] = radius
        kwargs['exclude_border'] = exclude_border
        kwargs['threshold_abs'] = threshold_abs
        kwargs['threshold_rel'] = 0
        # 返回的是(row, col)
        heatmap = corner_harris(img)
        # cnrs = corner_peaks(heatmap, threshold_rel=2e-3, **kwargs)
        cnrs = corner_peaks(heatmap, **kwargs)
        kpts = [cv.KeyPoint(
            float(xy[1]), float(xy[0]), 1, -1, heatmap[xy[0], xy[1]]) for xy in cnrs]

    # 删除nodata区域边缘nodata_border个像素以内的关键点
    kpts_ = []
    valid_mask = cv.morphologyEx(mask_img, op=cv.MORPH_ERODE, 
        kernel=np.ones((nodata_border*2+1, nodata_border*2+1))).astype(np.uint8)
    for pt in kpts:
        if valid_mask[int(pt.pt[1]), int(pt.pt[0])] > 0:
            kpts_.append(pt)
    kpts = kpts_


    return kpts

def cvpoints_to_dict(
    cvpoints: list, channel: str='', modal: str='',  scheme: str=''
) -> dict:
    '''
    将cv.KeyPoint批量转化为字典格式

    Param:
    -----
    cvpoints: cv.KeyPoint的列表
    channel: 该特征点所属通道（'B'、'G'、'R'、'NIR'、'mean'等）
    modal: 模态名称（对应spacenet文件夹名称）
    scheme: 特征点算法名称

    Return:
    -----
    返回 {"(x, y)":{
                "xy": (x,y),
                "size": 特征点尺寸,
                "response": 特征点响应,
                "channel": 通道标示,
                "modal": modal,
                "scheme": scheme,
            }
    '''
    keypoints = {}
    for kpt in tqdm(cvpoints):
        x, y = kpt.pt
        img_pt = (int(x), int(y))
        key = f"POINT ({int(x)} {int(y)})"

        if key not in keypoints or \
            keypoints[key]["response"] < kpt.response:
            tmp_dict = {
                "xy": img_pt,
                "size": kpt.size,
                "response": kpt.response,
                "channel": channel,
                "modal": modal,
                "scheme": scheme,
            }
            keypoints[key] = tmp_dict

    return keypoints

def dictpoint_to_world(kpts, tif, inplace=False, add=False):
    '''
    图像坐标转化为世界坐标系

    Param:
    -----
    kpts: 以（x，y）为键，值为含有"xy"-坐标对的字典的输入特征点
    tif: 特征点对应的rasterio数据集对象
    inplace: 若为真，则将每个特征点的"xy"的值改为世界坐标
    add: 若为真，则为每个特征点另加"word_xy"的值表世界坐标，本项与上一项不共存

    Return:
    -----
    返回世界坐标的列表
    '''

    results = []
    for values in tqdm(kpts.values()):
        x, y = values['xy']
        (lon, lat) = tif.xy(y, x)   # 从图像坐标转换为世界坐标

        if inplace:
            values["xy"] = (lon, lat)
        elif add:
            values["word_xy"] = (lon, lat)
        else:
            results.append([lon, lat])
    
    return results

def cvpoints_to_xy(cvpoints: list) -> list:
    '''
    将opencv的KeyPoint对象中的坐标转化坐标列表的的列表

    Param:
    -----
    cvpoints:要转换的KeyPoint对象的列表

    Return:
    -----
    [x,y]的列表
    '''
    points = []
    for pt in cvpoints:
        points.append([pt.pt[0], pt.pt[1]])
    return points

# 基于选择r半径内最大点的伪非最大关键点抑制
def keypoints_nms(img_shape, keypoints, r=64):
    feature_map = np.zeros(img_shape).astype(np.float64)

    for kpts in keypoints.values():
        coord = kpts["xy"]
        feature_map[coord[1], coord[0]] = kpts["response"]

    # 在原始特征图中找到峰值，并确保它们之间至少有r像素的间隔
    crn = corner_peaks(feature_map, min_distance=r, exclude_border=True)

    # 使用NMS选定特征的所有特征重建关键点列表
    keypoint_list = {}
    for pt in crn:
        keypoint_list[f"POINT ({pt[1]} {pt[0]})"] = keypoints[f"POINT ({pt[1]} {pt[0]})"]

    return keypoint_list

def get_crn__(
    file_list, save_dir, *, type_='RGB', modal='', scheme='HARRIS', 
    mean=True, border_radius=0
):
    '''
    对文件列表中的文件提取角点并保存

    Param:
    -----
    file_list: 需要提取特征点的文件列表
    save_dir: 图像-特征点字典的保存路径，如"./a.json"
    type_: 图像类型('RGB'/'NIR'/'SAR')
    modal: 模态信息('MS'/'SAR-Intensity'/'PS-RGB')
    scheme: 特征点算法，选项同find_keypoints
    mean: 是否对图像各通道的平均值提取特征点
    border_radius: nodata和图像边界若干像素范围以内不寻找特征点
    '''

    if type_ == 'RGB':
        ch_info = ['B', 'G', 'R']
        img_modal = 'MS'
        threshold_abs = 0.16
    elif type_ == 'NIR':
        ch_info = ['NIR']
        img_modal = 'MS'
    elif type_ == 'SAR':
        ch_info = ['SAR0', 'SAR1', 'SAR2', 'SAR3']
        img_modal = 'SAR-Intensity'
        threshold_abs = 5e-3
    else:
        raise ValueError(f"wrong type_: {type_}! stupid ass !")

    if modal != '':
        img_modal = modal

    if mean:
        ch_info.append('mean')

    # 图像-特征点属性的字典
    img_pt_dict = {}
    for file_name in file_list:
        img_np, tif = open_spn_as_np(
            file_name, type=type_, dset_obj=True, modal=img_modal
        )
        clac_img = img_np
        if mean:
            clac_img = np.concatenate((clac_img, 
                clac_img.mean(axis=2, keepdims=True).astype(np.uint8))
                , axis=2)

        # 特征点属性的字典
        keypoints = {}
        ch_num = clac_img.shape[2]
        size = clac_img.shape[1::-1]
        mask_img = tif.read_masks(1)
        # 分通道计算特征点
        for ch_ids, ch in zip(range(ch_num), ch_info):
            cp_img = clac_img[:, :, ch_ids].copy()
            keypts = find_keypoints(
                cp_img, mask_img, scheme=scheme, nodata_border=border_radius,
                exclude_border=border_radius,
                threshold_abs=threshold_abs
            )
            tmp_kpt = cvpoints_to_dict(keypts, channel=ch, modal=img_modal, 
                scheme=scheme)
            dictpoint_to_world(tmp_kpt, tif, add=True)
        # 将各通道特征点统一nms
        keypoints.update(tmp_kpt)

        radius = 30
        keypoints = keypoints_nms(size, keypoints, r=radius)

        fni = FillNameID([file_name], type_=img_modal)
        img_pt_dict[f"{fni.info[0]}"] = keypoints
        del fni

    cpd = ChangePointDict('img', img_pt_dict)
    cpd.to_json(json_dir=save_dir, json_='src')

    return img_pt_dict


def drawMain():
    # RGBN_dir = "E:\\datasets\\spacenet\\train\\MS\\" + \
    #             "SN6_Train_AOI_11_Rotterdam_MS_" + \
    #             "20190804111224_20190804111453_tile_8679.tif"
    RGBN_dir = "E:\\datasets\\spacenet\\train\\SAR-Intensity\\" +  \
                "SN6_Train_AOI_11_Rotterdam_SAR-Intensity_" + \
                "20190804111224_20190804111453_tile_8679.tif"
                
    os.chdir("E:\\workspace\\SOMatch\\preprocess")


    RGBN_dset = rasterio.open(RGBN_dir)
    max_num = 93
    RGBN_img = RGBN_dset.read()
    RGBN_img = RGBN_img / max_num
    RGBN_img = RGBN_img.astype(np.float32)
    RGBN_img = np.concatenate((RGBN_img, RGBN_img.mean(axis=0, keepdims=True)), axis=0)
    # RGBN_img = RGBN_img.astype(np.float32)

    show_img = []
    thresh_ratio_harris = 0.00001 # harris判别式的阈值相对于最大响应值的百分比
    radius = 10 # 角点最小相隔距离，仅skimage有用
    RGBN_img = (RGBN_img * 256).astype(np.uint8)
    for channel in range(RGBN_img.shape[0]):
        cp_img = RGBN_img[channel].copy()

        # 选择算法
        # tmp_img = cv.cornerHarris(cp_img, 2, 3, 0.05)
        # tmp_img = corner_harris(cp_img)
        keypts = find_keypoints(cp_img, scheme="HARRIS")

        # # 生成mask图像，需注释倒数二三行，如果需要生成响应热力图，仅需最后三行即可
        # corner_mask  = tmp_img >= (thresh_ratio_harris * tmp_img.max())
        # edge_mask  = tmp_img <= (thresh_ratio_harris * tmp_img.min())
        # tmp_img = np.zeros((tmp_img.shape[0], tmp_img.shape[1], 3), dtype=np.uint8)
        # tmp_img[corner_mask] = [0, 0, 255]
        # tmp_img[edge_mask] = [0, 255, 0]
        # # tmp_img = np.where(tmp_img>=0, tmp_img, 0)
        # # tmp_img = ((tmp_img - tmp_img.min()) / (tmp_img.max() - tmp_img.min()) * 255).astype(np.uint8)
        # show_img.append(tmp_img)

        # 通过指定点生成角点图，本段与上一段不共存
        # kpts = corner_peaks(tmp_img, min_distance=radius, threshold_rel=thresh_ratio_harris) # 返回的是(row, col)
        # kpts = corner_peaks(tmp_img)

        resp = []
        for pt in keypts:
            resp.append(pt.response)

        print(min(resp))
    
        kpts = np.array(cvpoints_to_xy(keypts), np.int32)
        kpts = kpts.T
        tmp_img = cp_img
        tmp_img = np.stack((tmp_img, tmp_img, tmp_img), axis=2)
        tmp_img[kpts[1], kpts[0]] = [0, 0, 255]
        show_img.append(tmp_img)

        # kpts = np.argwhere(tmp_img>thresh_ratio_harris*tmp_img.max())
        # kpts = np.array([cv.KeyPoint(coord[1], coord[0], 1) for coord in kpts])
        # cp_img = (cp_img * 256).astype(np.uint8)
        # show_img.append(cv.drawKeypoints(cp_img, kpts, cp_img, flags=cv.DRAW_MATCHES_FLAGS_DEFAULT))

    for ids, fname in enumerate(show_img):
        cv.imwrite(f'.\\results\\ch{ids}_harris.png', fname)


def get_crn(
    file_list, save_dir, *, type_='RGB', modal='', scheme='HARRIS', 
    mean=True, border_radius=0, threshold_abs=0.1, pproc_func=None, 
    read_func=open_img_np
) -> dict:
    '''
    对文件列表中的文件提取角点并保存

    Param:
    -----
    file_list: 需要提取特征点的文件列表
    save_dir: 图像-特征点字典的保存路径，如"./a.json"
    type_: 图像类型('RGB'/'NIR'/'SAR')
    modal: 模态信息('MS'/'SAR-Intensity'/'PS-RGB')
    scheme: 特征点算法，选项同find_keypoints
    mean: 是否对图像各通道的平均值提取特征点
    border_radius: nodata和图像边界若干像素范围以内不寻找特征点
    pproc_func: 对ndarray预处理的函数
    '''

    if type_ == 'RGB':
        ch_info = ['B', 'G', 'R']
        img_modal = 'MS'
    elif type_ == 'NIR':
        ch_info = ['NIR']
        img_modal = 'MS'
    elif type_ == 'SAR':
        ch_info = ['SAR0', 'SAR1', 'SAR2', 'SAR3']
        img_modal = 'SAR-Intensity'
    else:
        raise ValueError(f"wrong type_: {type_}! stupid ass !")

    if modal != '':
        img_modal = modal

    # 图像-特征点属性的字典
    img_pt_dict = {}
    for file_name in file_list:
        img_np = read_func(file_name)

        if pproc_func != None:
            clac_img = pproc_func(img_np)
        else:
            clac_img = img_np

        if len(clac_img.shape) == 2:
            clac_img = np.expand_dims(clac_img, axis=2)
        if mean and clac_img.shape[2] != 1:
            clac_img = np.concatenate((clac_img, 
                clac_img.mean(axis=2, keepdims=True).astype(np.uint8))
                , axis=2)

        # 特征点属性的字典
        keypoints = {}
        ch_num = clac_img.shape[2]
        size = clac_img.shape[1::-1]
        mask_img = 255 * np.ones(clac_img.shape[:2])
        # 分通道计算特征点
        for ch_ids, ch in zip(range(ch_num), ch_info):
            cp_img = clac_img[:, :, ch_ids].copy()
            keypts = find_keypoints(
                cp_img, mask_img, scheme=scheme, nodata_border=border_radius,
                exclude_border=border_radius,
                threshold_abs=threshold_abs
            )
            tmp_kpt = cvpoints_to_dict(keypts, channel=ch, modal=img_modal, 
                scheme=scheme)
        # 将各通道特征点统一nms
        keypoints.update(tmp_kpt)

        radius = 15
        keypoints = keypoints_nms(size, keypoints, r=radius)

        img_pt_dict[file_name] = keypoints

    jhelper = json_helper(dict_=img_pt_dict)
    jhelper.to_json(save_dir=save_dir)

    return img_pt_dict, save_dir


# get corner point from png datasets
def get_png_crn(
    *, workspace="", suffix_opt="", suffix_sar="", 
    dset_path="", save_dir="", list_path="", 
    scheme="HARRIS", threshold_abs=1, border_radius=32, 
    read_func=open_img_np
):
    """
    get corner point from png datasets

    Param:
    -----
    dset_path : folder of dataset, is "" if need relative path in json
    save_dir :  folder to save results json
    list_path : path of list file in which searched images are
    scheme :    default "HARRIS"
    threshold_abs : dict or num, harris threshold of all modals when num, 
                    modal name and corresponding threshold when dict
    border_radius : border to nms corners, it was 64 in source code
    suffix_opt :    suffix of OPT json, got by threshold and schema when it's ""
    suffix_sar :    suffix of SAR json, got by threshold and schema when it's ""
    workspace : path of workspace
    """
    workspace_path = "E:/workspace/SOMatch/" if workspace == "" else workspace
    sys.path.append(workspace_path)

    if type(threshold_abs) == int:
        tmp = threshold_abs
        threshold_abs = {"OPT" : tmp, "SAR" : tmp}

    suffix_opt = "{}{}".format(scheme, threshold_abs["OPT"]) if suffix_opt=="" else suffix_opt
    suffix_sar = "{}{}".format(scheme, threshold_abs["SAR"]) if suffix_sar=="" else suffix_sar

    with open(list_path) as fp:
        dset_dict = json.load(fp)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir, exist_ok=True)

    img2pt_file_list = []

    for modal, crn_dict in dset_dict.items():
        # 给相对路径加数据集路径
        if dset_path != '':
            for ids in range(len(crn_dict)):
                crn_dict[ids] = os.path.join(dset_path, crn_dict[ids])

        if modal == 'OPT':
            img_pt_dict, fn = get_crn(crn_dict, 
                os.path.join(save_dir,
                f"{modal}_{suffix_opt}.json"),
                type_='RGB', scheme=scheme, modal='PS-RGB', 
                border_radius=border_radius, threshold_abs=threshold_abs[modal], 
                read_func=read_func)
        if modal == 'SAR':
            img_pt_dict, fn = get_crn(crn_dict, 
                os.path.join(save_dir,
                f"{modal}_{suffix_sar}.json"),
                type_='SAR', scheme=scheme,
                border_radius=border_radius, threshold_abs=threshold_abs[modal], 
                read_func=read_func, pproc_func=bilatera_blur)

        img2pt_file_list.append(fn)

    return img2pt_file_list


# if __name__ == "__main__":

    # drawMain()
    # pass
    # # 本段用于对spacenet6提取特征点
    # with open("E:/workspace/SOMatch/preprocess/results/subset.json") as fp:
    #     dset_dict = json.load(fp)

    # scheme = 'HARRIS'
    # for modal, crn_dict in dset_dict.items():
    #     if modal == 'PS-RGB':
    #         img_pt_dict = get_crn(crn_dict, 
    #             "E:/workspace/SOMatch/preprocess/results/" + \
    #                 f"subset_PS-RGB_{scheme}.json",
    #             type_='RGB', scheme=scheme, modal='PS-RGB', 
    #             border_radius=160, threshold_abs=0.16)
    #     elif modal == 'SAR-Intensity':
    #         img_pt_dict = get_crn(crn_dict, 
    #             "E:/workspace/SOMatch/preprocess/results/" + \
    #                 f"subset_SAR_{scheme}.json",
    #             type_='SAR', scheme=scheme,
    #             border_radius=160, threshold_abs=5e-3)



