import os
from pathlib import Path
import math
from posixpath import basename, dirname
import random
import json
from re import S
from sys import prefix
from tkinter import N
from cv2 import sort

import numpy as np
import rasterio
from rasterio.warp import calculate_default_transform
from rasterio.vrt import WarpedVRT
from rasterio.enums import Resampling
import matplotlib.pyplot as plt
import pandas as pd
import geopandas
from shapely.geometry import Point
from skimage.feature import corner_peaks

# 默认数据集文件符合如下情况:
# 数据集根目录/train or test /城市名 /模态名 /图像名


def change_filename(filename: list, src_fold: str, dst_fold: str) -> list:
    '''
    将filename列表中的src_fold关键词改为dst_fold关键词
    '''
    results = []

    for name in filename:
        dirname = os.path.dirname(name)
        basename = os.path.basename(name)
        paths = basename.split(src_fold)
        basename = paths[0] + dst_fold + paths[-1]
        if dirname != '':
            dirname = os.path.dirname(dirname)
            dirname = os.path.join(dirname, dst_fold)
        results.append(os.path.join(dirname, basename))

    return results

def filelist_exist(filename):
    flag = True
    for path in filename:
        if not os.path.exists(path):
            flag = False
            print(path + ' dosen\'t exist')

    return flag

class SpnDsetPath:
    '''
    数据集路径下不同子文件夹中的文件路径，形成以子文件夹名称为键，文件路径列表为值
    的字典

    Attribute:
    dset_dict: 包含文件夹名-文件列表对的字典
    subset: 包含文件夹名-文件列表对的字典，表示子数据集，使用self.get_subset后有效
    length: 子文件夹下数据集长度

    Note:
    -----
    可以使用数字或者键值进行索引
    '''
    def __init__(self, dset_dir=None, fold_list=None) -> None:
        '''
        统计数据集子文件夹下的文件

        Param:
        -----
        dset_dir:数据集路径
        fold_list:数据集路径下子文件夹的名称列表
        '''
        if dset_dir == None:
            dset_dir = "E:\\datasets\\spacenet\\train"
        if fold_list == None:
            fold_list = ["PAN", "MS", "SAR-Intensity"]
        
        self.valid_suffix = ['.tiff', '.tif', '.png', '.jpg']
        self.dset_dir = dset_dir
        self.fold_list = fold_list

        self.dset_dict = self.__traverse_file()
        self.length = len(self.dset_dict[self.fold_list[0]])
        
        for fold_name in self.fold_list:
            print(f"{fold_name} has {len(self.dset_dict[fold_name])} elems")

    def __traverse_file(self) -> dict:
        dset_dict = {}
        print("traverse start")
        for data_type in self.fold_list:
            dset_path = Path(os.path.join(self.dset_dir, data_type))
            tmp_list = []
            for path in dset_path.iterdir():
                if path.is_file():
                    if os.path.splitext(path)[1] in self.valid_suffix:
                        tmp_list.append(str(path))
            dset_dict[data_type] = tmp_list
        print("traverse finished")

        return dset_dict

    def __getitem__(self, ids):
        if isinstance(ids, int) and ids < len(self.fold_list) \
            and ids >= 0:
            ids = self.fold_list[ids]
        if ids not in self.fold_list:
            raise ValueError(f"ids is {ids}, but it should be" \
            "in {self.fold_list}")

        return self.dset_dict[ids]

    def get_subset(
        self, fold_list=None, rate: float=None, num: int=None
    ) -> dict:
        '''
        划分指定占比或者数量的子数据集

        Param:
        -----
        fold_list:表模态名的文件夹路径列表，默认使用self.fold_list
        rate:子数据集占总数据集比例
        num:子数据集的数量，和rate参数仅一个有效

        Return:
        -----
        返回子数据集列表
        '''
        if fold_list == None:
            fold_list = self.fold_list
        else:
            if fold_list in self.fold_list:
                pass
            elif isinstance(fold_list, list):
                for i in fold_list:
                    if i not in self.fold_list:
                        raise ValueError(f"{i} not in self.fold_list")
            else:
                raise TypeError(f"fold_list should be a str or list")

        if rate == None and num == None:
            raise ValueError("both rate and num are None !")
        
        if num != None:
            num = num if num >= 0 else 0
            num = num if num <= self.length else self.length
            self.subset_size = num
        else:
            rate = rate if rate >= 0. else 0.
            rate = rate if rate <= 1. else 1.
            self.subset_size = math.ceil(rate * self.length)

        shuffle_ids = list(range(self.length))
        random.shuffle(shuffle_ids)
        subset_ids = shuffle_ids[:self.subset_size]

        fold_name = fold_list[0]
        self.subset = {fold_name: []}
        for ids in subset_ids:
            self.subset[fold_name].append(self.dset_dict[fold_name][ids])
        
        for i in range(1, len(fold_list)):
            dst_name = fold_list[i]
            self.subset[dst_name] = change_filename(self.subset[fold_name], 
                fold_name, dst_name)

        return self.subset

    def subset_exist(self):
        '''
        检测产生的子数据集中的路径是否全部存在
        '''
        flag = True
        for fold in self.subset:
            if not filelist_exist(self.subset[fold]):
                flag = False

        return flag

    def split_train_valid_test(
        self, *, rate: list=None, num: list=None, save_path=''
    ) -> None:
        '''
        
        '''
        if rate == None and num == None:
            raise ValueError("both rate and num are None !")

        if rate != None:
            if len(rate) == 1:
                rate.append(0)
            num = []
            for r in rate:
                num.append(round(r * self.length))
            
        if num != None:
            if len(num) > 3:
                raise ValueError("len > 3!")
            elif len(num) == 1:
                num += [0, None]
            elif len(num) == 2:
                num.append(None)
            else:
                train_num = num.pop(0)
                num.append(train_num)
            
            sum = 0
            for n in num:
                if n == None:
                    break
                sum += n
            if sum > self.length:
                raise ValueError("sum of rate > 1 or sum of num > length !")

            last = 0
            for i in range(len(num)):
                if num[i] == None:
                    break
                num[i] = last + num[i]
                last = num[i]
            
        shuffle_ids = list(range(self.length))
        random.shuffle(shuffle_ids)
        last = 0
        self.split_set = {}
        for n, m in zip(iter(num), iter(["valid", "test", "train"])):
            subset_ids = shuffle_ids[last:n]
            if n != None:
                last = n + 1
            fold_name = self.fold_list[0]
            self.split_set[m] = {fold_name: []}
            for ids in subset_ids:
                self.split_set[m][fold_name].append(
                    self.dset_dict[fold_name][ids])
                    
            for i in range(1, len(self.fold_list)):
                dst_name = self.fold_list[i]
                self.split_set[m][dst_name] = change_filename(
                    self.split_set[m][fold_name], fold_name, dst_name)

        if save_path != '':
            basename = os.path.basename(save_path)
            dirname = os.path.dirname(save_path)
            basename = os.path.splitext(basename)
            suffix = basename[1]
            basename = basename[0]

            for m in ["valid", "test", "train"]:
                file_path = os.path.join(
                    dirname, basename+f'_{m}'+suffix
                )
                with open(file_path, 'w') as fp:
                    json.dump(self.split_set[m], fp, indent='\t', 
                    sort_keys=True)
                
                tmp_dict = {}
                for modal, img in self.split_set[m].items():
                    name = None
                    if modal == "PS-RGB":
                        name = "OPT"
                    elif modal == "SAR-Intensity":
                        name = "SAR"
                    if name != None:
                        tmp_dict[name] = img
                file_path = os.path.join(
                    dirname, basename+f'_dset_{m}'+suffix
                )
                with open(file_path, 'w') as fp:
                    json.dump(tmp_dict, fp, indent='\t', sort_keys=True)

class FillNameID:
    '''
    spacenet数据集文件名和其id\info的互转

    Example:
    -----
    >>> name_list = FillNameID().info2name([name_list], modal, dset_dir)[0]
    >>> file_id = FillNameID().name2id([name_list])[0]
    '''

    name_list = ["Buildings", "MS", "PAN", "PS-MS", "PS-RGB", "SAR-Intensity"]

    def __init__(
        self, filename: list =None, fileid: list =None, \
        type_: str =None, dir_name: str =''
    ) -> None:
        '''
        spacenet数据集文件名和其id的互转

        Param:
        -----
        filename: 初始的文件路径列表
        fileid: 初始的文件info列表，与上一项仅可有一个
        type_: 表示id和文件属于哪一个文件夹
        dir_name: info转化为文件名时的数据集路径，如"./spacenet/train"

        Note:
        -----
        如果filename、fileid中有一个被赋值，则自动调用相应的转换函数。
        '''
        self.name = []
        self.id = []
        self.info = []
        self.type = type_
        if filename != None:
            self.name = filename
            self.dirname = os.path.dirname(self.name[0])
            self.dirname = os.path.dirname(self.dirname)
            self.name2id()
        elif fileid != None:
            self.info = fileid
            self.dirname = dir_name
            self.info2name()

    def info2name(self, info: list =None, type_=None, dirname='') -> list:
        '''
        文件info转文件名

        Param:
        -----
        info: （日期1，日期2，图像块id）的列表，可指定文件info列表，该列表将会赋值给self.info，默认使用self.info
        type_: 指定所属文件夹，若缺省则会使用self.type
        dirname: 指定转换后文件名所属的路径，会赋值给self.dirname

        Return:
        -----
        返回转换后的结果
        self.name: 转换结果会添加到该属性已有元素后

        Note:
        转换结果将会被添加到self.name已有元素之后，如果想仅得到本次转换结果，需在调用本函数前调用clear函数
        -----
        '''
        if type_ == None:
            type_ = self.type
        if type_ == None:
            raise ValueError(f"both self.type and type_ are None")

        if dirname != '':
            self.dirname = dirname
        else:
            dirname = self.dirname


        self.info = info if info != None else self.info
        for i in self.info:
            if isinstance(i, str):
                i = eval(i)
            basename = i
            basename = str(basename[0]) + '_' + str(basename[1]) + \
                '_tile_' + str(basename[2]) + '.tif'
            basename = "SN6_Train_AOI_11_Rotterdam_" + \
                type_ + '_' + basename
            name = os.path.join(dirname, type_, basename)
            self.name.append(name)
        
        return self.name

    def name2id(self, name: list =None) -> list:
        '''
        文件名转文件id

        Param:
        -----
        name: 可指定文件名列表，该列表将会赋值给self.name

        Return:
        -----
        返回转换后的结果
        self.id: 添加id到已有元素后方
        self.info: 添加（日期1，日期2，id）的列表到已有元素后方

        Note:
        转换结果将会被添加到self.id已有元素之后，如果想仅得到本次转换结果，需在调用本函数前调用clear函数
        -----
        '''
        self.name = name if name != None else self.name

        for i in self.name:
            basename = os.path.basename(i)
            basename = os.path.splitext(basename)[0]
            if basename[0] == '.':
                raise ValueError(f"{i} has an unexpected .")
            if self.type != None:
                if self.type not in basename:
                    raise ValueError(f"{i} has no key word {self.type}")
                type = self.type
            else:
                for type in FillNameID.name_list:
                    if type in basename:
                        break
            id = basename.split(type+'_')[1]
            id = id.split('_tile_')
            id[0] = id[0].split('_')
            self.id.append(int(id[1]))
            self.info.append((int(id[0][0]), int(id[0][1]), int(id[1])))

        return self.info

    def clear(self, tgt='id'):
        '''
        清除文件名或文件id列表

        Param:
        -----
        tgt: 'id'\'name'，选择清理self.id和self.info、self.name
        '''
        if tgt == 'id' or tgt == 'info':
            self.id.clear()
            self.info.clear()
        elif tgt == 'name':
            self.name.clear()

def str_to_tuple(tuple_str, float_=False):
    tuple_str = tuple_str.strip('()')
    tuple_str = tuple_str.split(',')
    results = []
    for i in tuple_str:
        if not float_:
            results.append(int(i))
        else:
            results.append(float(i))
    return tuple(results)

class ChangePointDict:
    '''
    实现图像-点的字典和编号-点的字典的互转

    Attribute:
    -----
    types: 'img'/'pt'，表示字典是图像-点的字典、编号-点的字典
    src_dict: 被转化的字典
    tgt_dict: 转化后的字典

    Note:
    -----
    对象支持索引读写相应的字典值
    图像-点的字典:{
        "(文件名日期1，文件名日期2，文件id)": {
            "(图像坐标x， 图像坐标y)": {
                点的各种属性
            }}}
    编号-点的字典:{顺序编号:{
        "img_info": 点所在图像文件 (文件名日期1，文件名日期2，文件id),
        "img_id": 点所在图像文件id,
        点的各种属性,
    }}

    Example:
    -----
    当作类使用:

    >>> # 可以通过初始化后直接得到转化后的结果
    >>> cpd = ChangePointDict('img', img_dict)
    >>> pt_dict = cpd[1]

    >>> # 或是在初始化后显式调用transform()
    >>> cpd = ChangePointDict()
    >>> cpd.from_json(json_dir) # 通过json赋值
    >>> cpd[0] = img_dict # 或是直接赋值
    >>> cpd.transform(types='img')

    仅使用类中方法:
    >>> pt_dict = ChangePointDict().from_json(json_dir)
    >>> img_dict = ChangePointDict().transform('pt', pt_dict)
    '''
    def __init__(self, types='pt', dict: dict=None, json_dir=None, 
        id_key='') -> None:
        '''
        设置字典或是加载json，并自动转化字典格式

        Param:
        -----
        types: 'img'/'pt'，表示字典是图像-点的字典、编号-点的字典，使用transform函数时会自动转化成另一种格式
        dict: 使用字典初始化src_dict属性
        json_dir: 从json加载字典到src_dict属性，与上一项最多存在一个

        Note:
        -----
        如果有通过dict或json_dir之一进行初始化，则会自动转化为另一种格式，并存储于tgt_dict属性
        '''
        if types not in ['img', 'pt']:
            raise ValueError(f"types must in ['img', 'pt'], but it's {types}")

        self.types = types
        self.tgt_dict = None
        self.id_key = id_key

        if dict != None:
            self.src_dict = dict
            self.transform()
        elif json_dir != None:
            self.src_dict = self.from_json(json_dir)
            self.transform()

    def from_json(self, json_dir):
        '''
        从json转化为dict

        Param:
        -----
        json_dir: 对应json路径

        Return:
        ----
        返回转化后的字典
        同时改变src_dict属性
        '''
        with open(json_dir, 'r') as fp:
            self.src_dict = json.load(fp)
        
        return self.src_dict

    def to_json(self, json_dir, json_='tgt'):
        '''
        存储字典为json

        Param:
        -----
        json_dir: 存储路径
        json_: 'tgt'/'src'存储tgt_dict或是src_dict
        '''
        if json_ == 'tgt':
            if self.tgt_dict == None:
                raise ValueError("self.tgt_dict is None!")
            with open(json_dir, 'w') as fp:
                json.dump(self.tgt_dict, fp, indent='\t', sort_keys=True)
        elif json_ == 'src':
            with open(json_dir, 'w') as fp:
                json.dump(self.src_dict, fp, indent='\t', sort_keys=True)
                
    def transform(self, types=None, src_dict=None, id_key=''):
        '''
        转化字典格式

        Param:
        ----
        types: 'img'/'pt',被转化字典的格式，默认使用types属性
        src_dict: 被转化的字典，默认使用src_dict属性

        Return:
        -----
        返回转化的结果
        同时会存储到tgt_dict属性中
        '''
        if types != None:
            self.types = types
        if self.types == 'img':
            if id_key == '' and self.id_key != '':
                id_key = self.id_key
            return self.img2point(src_dict, id_key)
        else:
            return self.point2img(src_dict)

    def img2point(self, src_dict=None, id_key='') -> dict:
        if src_dict == None:
            src_dict = self.src_dict

        cnt = 0
        points_dict = {}
        for img, pts_dict in src_dict.items():
            if isinstance(img, tuple):
                img_info = img
            else:
                img_info = str_to_tuple(img)
            for pt_attr in pts_dict.values():
                tmp_dict = {
                    "img_info" : img_info,
                    "img_id" : img_info[-1],
                }
                tmp_dict.update(pt_attr)
                points_dict[id_key+str(cnt)] = tmp_dict
                cnt += 1

        self.tgt_dict = points_dict

        return points_dict

    def point2img(self, src_dict=None) -> dict:
        if src_dict == None:
            src_dict = self.src_dict

        img_dict = {}
        for pts in src_dict.values():
            img_info = tuple(pts["img_info"])
            if str(img_info) not in img_dict:
                img_dict[str(img_info)] = {}
            point_key = str(tuple(pts["xy"]))
            pts.pop("img_info")
            pts.pop("img_id")
            img_dict[str(img_info)][point_key] = pts

        self.tgt_dict = img_dict
    
        return img_dict

    def __getitem__(self, ids):
        if ids == 0:
            return self.src_dict
        elif ids == 1:
            return self.tgt_dict

    def __setitem__(self, ids, value):
        if ids == 0:
            self.src_dict = value
        elif ids == 1:
            self.tgt_dict = value

def get_ids2id_list(src_json: list, save_path: str, dset_dir: str=None) -> dict:
    index = 0
    ids2id_list = {'from_files': [], 'points': []}
    for json_file in src_json:
        print(json_file)
        pt_dict = ChangePointDict().from_json(json_file)
        ids2id_list['from_files'].append(json_file)
        for pt_info in pt_dict.values():
            if dset_dir != None:
                pt_info['img_path'] = FillNameID(
                    fileid=[pt_info['img_info']], type_=pt_info['modal'],
                    dir_name = dset_dir).name[0]
            ids2id_list['points'].append(pt_info)
            index += 1
    ids2id_list['len'] = index

    with open(save_path, 'w') as fp:
        json.dump(ids2id_list, fp, indent='\t')
    return ids2id_list

def read_json(json_dir):
    '''
    从json转化为dict

    Param:
    -----
    json_dir: 对应json路径

    Return:
    ----
    返回转化后的字典
    同时改变src_dict属性
    '''
    with open(json_dir, 'r') as fp:
        src_dict = json.load(fp)
    
    return src_dict

def img2pt_to_geojson(
    img2pt: list=None, img2pt_json: list=None, *, 
    save_dir: str='', modal_dset_dir: dict={}, func=None, 
):
    '''
    将图-点字典写成点及其属性的geojson

    Param:
    -----
    img2pt/img2pt_json: 图-点字典或者其存储路径
    save_dir: geojson存储路径
    '''
    if img2pt:
        if not isinstance(img2pt, list):
            img2pt = [img2pt]
    else:
        if not isinstance(img2pt_json, list):
            img2pt_json = [img2pt_json]
        img2pt = []
        for json_ in img2pt_json:
            img2pt.append(read_json(json_))

    length = len(img2pt)
    photo_set = set(img2pt[0].keys())
    for i in range(1, length):
        tmp_set = set(img2pt[i].keys())
        photo_set = photo_set.intersection(tmp_set)

    pt_list = []
    # 每张对应的图片
    for img in photo_set:
        # 每个角点dict文件
        for dict_ in img2pt:
            # 每个文件同一图中的角点
            for pt in dict_[img].values():
                tmp_dict = {
                    "path": FillNameID().info2name(
                        [img], pt["modal"], modal_dset_dir[pt["modal"]]
                    )[0],
                }
                tmp_dict.update(pt)
                tmp_dict['x'] = tmp_dict['word_xy'][0]
                tmp_dict['y'] = tmp_dict['word_xy'][1]
                tmp_dict.pop('word_xy')
                pt_list.append(tmp_dict)

    df = pd.DataFrame(pt_list)
    df['geometry'] = geopandas.points_from_xy(df['x'], df['y'])
    df['xy'] = df['xy'].apply(lambda x: f"POINT ({x[0]} {x[1]})")
    del df['x'], df['y']
    df['wkt'] = df['geometry'].apply(lambda x: str(x))
    gdf = geopandas.GeoDataFrame(df)

    if save_dir != '':
        gdf.to_file(save_dir, driver="GeoJSON")

    return gdf

def img2pt_to_win_gjson(
    cities: list, img2pt: list=None, img2pt_json: list=None, *, 
    save_dir: str='', win_size: tuple=(320, 320), 
    modals: list=['SAR-Intensity', 'PS-RGB'], 
):
    '''
    根据图-点对，提取具有一定尺寸大小的窗口

    Param:
    -----
    cities: 城市列表，顺序要与下一个参数对应，可以出现重复城市名
    img2pt/img2pt_json: 图-点字典或者其存储路径
    save_dir: geojson存储路径
    win_size: 窗口大小
    modals: 表示每个点对应若干各模态的图像
    '''
    if img2pt:
        if not isinstance(img2pt, list):
            img2pt = [img2pt]
    else:
        if not isinstance(img2pt_json, list):
            img2pt_json = [img2pt_json]
        img2pt = []
        for json_ in img2pt_json:
            img2pt.append(read_json(json_))

    if len(cities) != len(img2pt):
        raise ValueError("city should have the same len with img2pt")

    photo_set = set(img2pt[0].keys())
    for i in range(1, len(img2pt)):
        tmp_set = set(img2pt[i].keys())
        photo_set = photo_set.intersection(tmp_set)

    win_list = []
    for img in photo_set:
        # 每个角点dict文件
        for dict_, city in zip(img2pt, cities):
            # 每个文件同一图中的角点
            for pt in dict_[img].values():
                for m in modals:

                    row_start = pt['xy'][1] - win_size[1] // 2
                    row_stop = pt['xy'][1] + win_size[1] // 2
                    col_start = pt['xy'][0] - win_size[0] // 2
                    col_stop = pt['xy'][0] + win_size[0] // 2

                    tmp_dict = {
                        "wkt": str(Point(pt['word_xy'][0], pt['word_xy'][1])),
                        "window": f"(({row_start}, {row_stop}), ({col_start}, {col_stop}))",
                        "x": pt['xy'][0], 
                        "y": pt['xy'][1], 
                    }

                    tif_path = FillNameID(
                        fileid=[img], type_=m, 
                        dir_name=city
                    ).name[0]
                    tmp_dict['path'] = tif_path
                    win_list.append(tmp_dict)

    df = pd.DataFrame(win_list)
    df["geometry"] = geopandas.points_from_xy(df['x'], df['y'])
    del df['x'], df['y']
    gdf = geopandas.GeoDataFrame(df, geometry='geometry')

    if save_dir != '':
        gdf.to_file(save_dir, driver='GeoJSON')

    return gdf

def geojson_to_img2pt(
    geo_gjson: str, *, img2pt_json: list=None, modals: list=None, 
    save_path: list=None
) -> None:
    '''
    特征点geojson转化成各模态的img2pt的json

    Param:
    ----
    geo_gjson: 特征点geojson
    img2pt_json: geo_gjson包含的点对应的json文件，顺序与接下来的参数相对应
    modals: json文件代表的不同的模态
    save_path: 按照模态顺序分成的不同模态下的数据存储的路径
    '''

    gjson = geopandas.read_file(geo_gjson)
    dict_list = []
    for json_dir in img2pt_json:
        cpd = ChangePointDict()
        json_ = cpd.from_json(json_dir)
        dict_list.append(json_)

    if len(dict_list) != len(modals):
        raise ValueError("different length between dict and geo_gjson")

    img2pt = [{}, {}]
    for pts_info in gjson.iterrows():
        pts = pts_info[1]
        file_info = FillNameID().name2id([pts['path']])[0]
        file_wkt = str(file_info)
        for m, dict_, dst_dict in zip(modals, dict_list, img2pt):
            if m in os.path.basename(pts['path']):
                wkt = pts["xy"]
                if not file_wkt in dst_dict:
                    dst_dict[file_wkt] = {}
                dst_dict[file_wkt][wkt] = dict_[file_wkt][wkt]
                break

    for path, dst_dict in zip(save_path, img2pt):
        with open(path, 'w') as fp:
            json.dump(dst_dict, fp, indent='\t', sort_keys=True)

def merge_subset(subsets: list, save_path: str) -> None:
    '''
    合并若干个图像列表文件
    '''
    dset = []
    for subset in subsets:
        dset.append(read_json(subset))

    merged = {}
    for dict_ in dset:
        for modal, m_dict in dict_.items():
            if modal not in merged:
                merged[modal] = []
            merged[modal].extend(m_dict)
    
    with open(save_path, 'w') as fp:
        json.dump(merged, fp, indent='\t', sort_keys=True)

    pass

def merge_img2pt(img2pt_path: list, save_path: str) -> None:
    img2pt = []
    for path in img2pt_path:
        with open(path) as fp:
            dict_ = json.load(fp)
        img2pt.append(dict_)

    photo_set = set()
    for dict_ in img2pt:
        photo_set = photo_set | set(dict_.keys())

    dst_dict = {}
    for photo_info in photo_set:
        dst_dict[photo_info] = {}
        for dict_ in img2pt:
            if not photo_info in dict_:
                continue
            dst_dict[photo_info].update(dict_[photo_info])
    
    with open(save_path, 'w') as fp:
        json.dump(dst_dict, fp, indent='\t', sort_keys=True)

# 有特征点筛选地合并多模态img2pt文件
def merge_img2pt_chosen_pt(
    img2pt_json: list, *, save_path: str='', modal_dset_dir: dict={}, 
    chosen_func=None, exclude_empty_file: bool=True
) -> None:
    """
    有特征点筛选地合并多模态img2pt文件

    Param:
    exclude_empty_file : 是否删除没有关键点的图像，这样最终结果中的图像必须在多个img2pt 中都有关键点
    chosen_func : 对合并后的所有点做筛选的函数
    """

    img2pt = []
    for json_ in img2pt_json:
        img2pt.append(read_json(json_))

    length = len(img2pt)
    if exclude_empty_file:
        photo_set = set(filter(lambda a: len(img2pt[0][a])>0, img2pt[0].keys()))
    else:
        photo_set = set(img2pt[0].keys())
    for i in range(1, length):
        if exclude_empty_file:
            tmp_set = set(filter(
                lambda a: len(img2pt[i][a])>0, img2pt[i].keys()
            ))
        else:
            tmp_set = set(img2pt[i].keys())
        photo_set = photo_set.intersection(tmp_set)
        

    pt_dict = {}
    # 每张对应的图片
    for img in photo_set:
        pt_dict[img] = {}
        # 每个角点dict文件
        for dict_ in img2pt:
            pt_dict[img].update(dict_[img])

    if chosen_func != None:
        ids = chosen_func(pt_dict)
        for info in pt_dict.keys():

            pop_keys = []
            for flag, pt_key in zip(ids[info], pt_dict[info]):
                if not flag:
                    pop_keys.append(pt_key)

            for key_ in pop_keys:
                pt_dict[info].pop(key_)
        
    if save_path != '':
        with open(save_path, 'w') as fp:
            json.dump(pt_dict, fp, indent='\t', sort_keys=True)

def wkt_to_kpt(wkts: list) -> dict:
    kpt = {}
    for wkt in wkts:
        wkt_ = wkt.strip('POINT() ')
        wkt_ = wkt_.split(' ')
        kpt[wkt] = {
            'xy': [int(wkt_[0]), int(wkt_[1])],
            'response': 1
        }

    return kpt

def chosen_func_nms(pt_dict, radius=63):

    chosen_dict = {}
    for img_info, pts in pt_dict.items():
        chosen_dict[img_info] = []
        kpts = wkt_to_kpt(list(pts.keys()))

        max_x, max_y = 0, 0
        for kpt in kpts.values():
            coord = kpt["xy"]
            max_x = coord[0] if coord[0] > max_x else max_x
            max_y = coord[1] if coord[1] > max_y else max_y

        feature_map = np.zeros((max_y+1, max_x+1)).astype(np.float64)

        for kpt in kpts.values():
            coord = kpt["xy"]
            feature_map[coord[1], coord[0]] = kpt["response"]
            
        crn = corner_peaks(feature_map, min_distance=radius, exclude_border=0)

        # 根据nms的结果，在列表中添加原字典中的点是否在结果中的0/1标志位
        def wkt_to_pt(wkt):
            wkt = wkt.strip('POINT() ')
            wkt = wkt.split(' ')
            return(int(wkt[0]), int(wkt[1]))
        for pt_ in pts.keys():
            pt_ = wkt_to_pt(pt_)
            if [pt_[1], pt_[0]] in crn:
                chosen_dict[img_info].append(1)
            else:
                chosen_dict[img_info].append(0)

    return chosen_dict

def merged_img2pt_to_geojson(
    img2pt: list=None, img2pt_json: list=None, *, 
    save_dir: str='', dset_dir: list=[]
):
    '''
    将图-点字典写成点及其属性的geojson

    Param:
    -----
    img2pt/img2pt_json: 图-点字典或者其存储路径
    save_dir: geojson存储路径
    dset_dir: 每个img2pt对应的数据集路径（标准路径中模态名上一级的名称）
    '''

    if img2pt:
        if not isinstance(img2pt, list):
            img2pt = [img2pt]
    else:
        if not isinstance(img2pt_json, list):
            img2pt_json = [img2pt_json]
        img2pt = []
        for json_ in img2pt_json:
            img2pt.append(read_json(json_))

    if len(img2pt) != len(dset_dir):
        raise ValueError("img2pt should have the same with dset_dir")
    
    pt_list = []
    for dict_, dir_ in zip(img2pt, dset_dir):

        for info, pt_dict_ in dict_.items():
            for pt in pt_dict_.values():

                tmp_dict = {
                    "path": FillNameID().info2name(
                        [info], pt["modal"], dir_
                    )[0],
                }
                tmp_dict.update(pt)
                tmp_dict['x'] = tmp_dict['word_xy'][0]
                tmp_dict['y'] = tmp_dict['word_xy'][1]
                tmp_dict.pop('word_xy')
                pt_list.append(tmp_dict)

    df = pd.DataFrame(pt_list)
    df['geometry'] = geopandas.points_from_xy(df['x'], df['y'])
    df['xy'] = df['xy'].apply(lambda x: f"POINT ({x[0]} {x[1]})")
    del df['x'], df['y']
    df['wkt'] = df['geometry'].apply(lambda x: str(x))
    gdf = geopandas.GeoDataFrame(df)

    if save_dir != '':
        gdf.to_file(save_dir, driver="GeoJSON")

def merged_img2pt_to_win_gjson(
    cities: list, img2pt: list=None, img2pt_json: list=None, *,
    save_dir: str='', win_size: tuple=(320, 320), 
    modals: list=['SAR-Intensity', 'PS-RGB'], 
) -> None:
    '''
    根据图-点对，提取具有一定尺寸大小的窗口

    Param:
    -----
    cities: 城市列表，顺序要与下一个参数对应，可以出现重复城市名
    img2pt/img2pt_json: 图-点字典或者其存储路径
    save_dir: geojson存储路径
    win_size: 窗口大小
    modals: 表示每个点对应若干各模态的图像
    '''

    if img2pt:
        if not isinstance(img2pt, list):
            img2pt = [img2pt]
    else:
        if not isinstance(img2pt_json, list):
            img2pt_json = [img2pt_json]
        img2pt = []
        for json_ in img2pt_json:
            img2pt.append(read_json(json_))

    if len(cities) != len(img2pt):
        raise ValueError("city should have the same len with img2pt")

    win_list = []
    for dict_, city in zip(img2pt, cities):
        for info, pt_info in dict_.items():
            for pt in pt_info.values():
                for m in modals:

                    row_start = pt['xy'][1] - win_size[1] // 2
                    row_stop = pt['xy'][1] + win_size[1] // 2
                    col_start = pt['xy'][0] - win_size[0] // 2
                    col_stop = pt['xy'][0] + win_size[0] // 2
                    
                    tmp_dict = {
                        "wkt": str(Point(pt['word_xy'][0], pt['word_xy'][1])),
                        "window": f"(({row_start}, {row_stop}), ({col_start}, {col_stop}))",
                        "x": pt['xy'][0], 
                        "y": pt['xy'][1], 
                    }
                    
                    tif_path = FillNameID(
                        fileid=[info], type_=m, 
                        dir_name=city
                    ).name[0]
                    tmp_dict['path'] = tif_path
                    win_list.append(tmp_dict)
                    
    df = pd.DataFrame(win_list)
    df["geometry"] = geopandas.points_from_xy(df['x'], df['y'])
    del df['x'], df['y']
    gdf = geopandas.GeoDataFrame(df, geometry='geometry')

    if save_dir != '':
        gdf.to_file(save_dir, driver='GeoJSON')

    pass

def divide_img2pt(
    img2pt: dict=None, img2pt_json: str=None, *, save_path: str=''
) -> list:
    
    if img2pt == None:
        img2pt = read_json(img2pt_json)

    modals = {}
    dst_dict = []
    for info, pt_info in img2pt.items():
        for pt, pt_dict in pt_info.items():
            m = pt_dict["modal"]
            if m not in modals:
                m_ids = len(dst_dict)
                modals[m] = m_ids
                dst_dict.append({})

            if info not in dst_dict[modals[pt_dict["modal"]]]:
                dst_dict[modals[pt_dict["modal"]]][info] = {}

            dst_dict[modals[pt_dict["modal"]]][info][pt] = pt_dict

    if save_path != '':
        dirname = os.path.dirname(save_path)
        filename = os.path.basename(save_path)
        filename = os.path.splitext(filename)
        suffix = filename[1]
        filename = filename[0]

        for dict_, m in zip(dst_dict, modals.keys()):
            with open(os.path.join(dirname, filename+f'_{m}'+suffix), 'w') as fp:
                json.dump(dict_, fp, indent='\t', sort_keys=True)
    
    return dst_dict

def open_spn_as_np(
    src_file, *, window=None, type='RGB', modal='', dset_obj=False, resolution=1
):
    if type not in ['RGB', 'NIR', 'SAR']:
        raise ValueError(f"type: {type} not in ['RGB', 'NIR', 'SAR'] !")
    if type == 'RGB':
        if modal == 'PS-RGB':
            min_ = np.array([0, 0, 0])
            max_ = np.array([256., 256., 256.])
            scale = 256
            band = slice(2, None, -1)
        else:
            min_ = np.array([0, 0, 0])
            max_ = np.array([2048, 2048, 2048])
            scale = 256
            band = slice(2, None, -1)
    elif type == 'NIR':
        min_ = np.array([0])
        max_ = np.array([2048])
        scale = 256
        band = slice(3, None)
    elif type == 'SAR':
        min_ = np.array([0])
        max_ = np.array([93., 92., 92., 92.])
        # max_ = np.array([256., 256., 256., 256.])
        scale = 256
        band = slice(0, None)

    tif = rasterio.open(src_file)
    dst_transform, dst_width, dst_height = calculate_default_transform(tif.crs, tif.crs, tif.width, tif.height, *tif.bounds, resolution=(resolution, resolution))
    vrt_opts = {
        'resampling': Resampling.nearest,
        'crs': "EPSG:{}".format(tif.crs.to_epsg()),
        'transform': dst_transform,
        'height': dst_height,
        'width': dst_width,
        }
    tif = WarpedVRT(tif, **vrt_opts)
    
    img_np = tif.read()
    img_np = img_np[band]
    img_np = np.transpose(img_np, [1, 2, 0])

    img_np = (img_np.clip(*(min_, max_)) - min_) / (max_ - min_)
    img_np = (img_np * scale).astype(np.uint8)

    if not dset_obj:
        return img_np
    else:
        return img_np, tif

def kpt2mark(shape, kpt, mark_type='.'):
    if mark_type == '.':
        mark_pt = kpt
    elif mark_type == 'square':
        radius = 5
        mark_num = 8 * radius
        mark_pt = kpt[:, np.newaxis, :]
        mark_pt = np.tile(mark_pt, (1, mark_num, 1))
        ends = np.array([
            [-radius, -radius],
            [radius, -radius],
            [radius, radius],
            [-radius, radius]
            ])
        addend = np.array(range(0, 2*radius), dtype=np.int32)
        addend = np.stack((addend, np.zeros_like(addend)), axis=1)
        point_shift = np.empty((0, 2))
        for i in ends:
            if i[0] * i[1] < 0:
                tmp_shift = addend[:, 1::-1]
            else:
                tmp_shift = addend
            if i[1] > 0:
                tmp_shift = -tmp_shift
            tmp_shift = tmp_shift + i
            point_shift = np.append(point_shift, tmp_shift, axis=0)
        mark_pt = mark_pt + point_shift
        mark_pt = mark_pt.reshape(-1, mark_pt.shape[2])
        mark_pt = mark_pt.clip([0, 0], [shape[0]-1, shape[1]-1])

    return mark_pt

def dict_to_RGB_plotimg(img_dict, save_dir, dset_dir, *, 
    img_type='RGB', ch_list=None, color=[255, 0, 0], mark_type='.', 
    save_kw='', modal=''
):
    '''
    根据城市对应的img2pt绘图

    Param:
    ----
    img_dict: img2dict字典
    save_dir: 图像保存的路径
    dset_dir: 数据集路径，到模态名文件夹的上一级
    img_type: 绘制图像的类型('RGB'/'NIR'/'SAR')
    ch_list: 通道索引，可使用img_type对应的默认设置，其长度应为1/3
    color: 特征点标记的颜色，通道数要与ch_list保持一致
    mark_type: 特征点标记类型，与kpt2mark同名参数一致
    save_kw: 保存图像时的前缀
    modal: 模态名('SAR-Intensity'/'MS'/'PS-RGB'/)
    '''

    if not os.path.exists(save_dir):
        os.mkdir(save_dir)

    if img_type == 'RGB':
        if ch_list == None:
            ch_list = [0, 1, 2]
        elif len(ch_list) !=1 and len(ch_list) != 3:
            raise ValueError(f"length of ch_list: {ch_list} should be 1 or 3")
        file_modal = 'MS' if modal == '' else modal
    elif img_type == 'NIR':
        if ch_list == None:
            ch_list = [0]
        elif len(ch_list) !=1:
            raise ValueError(f"length of ch_list: {ch_list} should be 1")
        file_modal = 'MS'
    elif img_type == 'SAR':
        if ch_list == None:
            ch_list = [2, 1, 3]
            # ch_list = [0, 1, 2]
        elif len(ch_list) !=1 and len(ch_list) != 3:
            raise ValueError(f"length of ch_list: {ch_list} should be 1 or 3")
        file_modal = 'SAR-Intensity'
    

    # 如果键值为字符串则转化为元组，最后将图片的信息转化为文件的路径
    img_info_list = list(img_dict.keys())
    if not isinstance(img_info_list[0], tuple):
        img_info_list = [str_to_tuple(i) for i in img_info_list]
    fni = FillNameID(fileid=img_info_list, type_=file_modal, dir_name=dset_dir)
    file_list = fni.name
    del fni

    for file_, points in zip(file_list, img_dict.values()):
        background = open_spn_as_np(file_, type=img_type, modal=modal)
        background = background[:, :, ch_list]
        if img_type == 'NIR':
            background = np.tile(background, (1, 1, 3))

        kpt_coord = np.empty((0, 2), dtype=np.int32)
        for pt_attri in points.values():
            kpt_coord = np.append(kpt_coord, 
                np.array(pt_attri["xy"]).reshape(1, -1), axis=0)

        if kpt_coord.shape[0] > 0:
            mark_coord = kpt2mark(
                background.shape[1::-1], kpt_coord, mark_type=mark_type)
            mark_coord = mark_coord.astype(np.int32)
            background[mark_coord[:, 1], mark_coord[:, 0]] = color

        showimg_name = os.path.basename(file_)
        showimg_name = save_kw + '_' + os.path.splitext(showimg_name)[0] + \
            ".png"
        plt.imsave(os.path.join(save_dir, showimg_name), background)

if __name__ == '__main__':

    # # test ChangePointDict._img2point()
    # cpd = ChangePointDict('img', 
    #     json_dir='E:/workspace/SOMatch/preprocess/results/subset_SAR_HARRIS.json', id_key='HARRIS-SAR-')
    # cpd.to_json('E:/workspace/SOMatch/preprocess/results/subset_SAR_HARRIS_id2pt.json')

    # test ChangePointDict.point2img()
    # cpd = ChangePointDict('pt', 
    #     json_dir='E:/workspace/SOMatch/preprocess/results/points.json')
    # cpd.to_json('E:/workspace/SOMatch/preprocess/results/test_img2info.json')

    # test dict_to_RGB_plotimg
    modals = ['PS-RGB', 'SAR-Intensity']
    types = ['RGB', 'SAR']
    # prefix = "no_nms_0_16+5e-3"
    prefix = "63nms"

    for modal, type in zip(modals, types):
        # img2dict = read_json(f"./preprocess/subthresh/thresh_HARRIS_divided_{modal}.json")
        img2dict = read_json(f"./preprocess/subthresh/thresh_HARRIS_divided_{modal}.json")

        dict_to_RGB_plotimg(img2dict,
            f"E:/workspace/SOMatch/preprocess/subthresh/{modal}/{prefix}", 
            "E:/datasets/spacenet-dataset/spacenet/SN6_buildings/train/AOI_11_Rotterdam/", 
            img_type=type, color=[255, 0, 0], mark_type='square', 
            save_kw=prefix, modal=modal
        )

    # # test merge_img2pt_chosen_pt
    # merge_img2pt_chosen_pt(
    # ["E:/workspace/SOMatch/preprocess/results/bb_/test_HARRIS_divided_PS-RGB.json",
    # "E:/workspace/SOMatch/preprocess/results/bb_/test_HARRIS_divided_SAR-Intensity.json"],
    # save_path="E:/workspace/SOMatch/preprocess/results/subset_HARRIS_merge.json",
    # chosen_func=chosen_func_nms)

    # # test merged_img2pt_to_geojson
    # merged_img2pt_to_geojson(
    #     img2pt_json=[
    #         "E:/workspace/SOMatch/preprocess/results/subset_SAR_HARRIS_chosen.json",
    #         "E:/workspace/SOMatch/preprocess/results/subset_PS-RGB_HARRIS_chosen.json"
    #     ],
    #     save_dir="E:/workspace/SOMatch/preprocess/results/subset_chosen.geojson",
    #     dset_dir=[
    #         "AOI_11_Rotterdam", 
    #         "AOI_11_Rotterdam",
    #     ]
    # )

    # # test merged_img2pt_to_win_gjson
    # merged_img2pt_to_win_gjson(
    #     ['AOI_11_Rotterdam', 'AOI_11_Rotterdam'], 
    #     img2pt_json=[
    #         "E:/workspace/SOMatch/preprocess/results/subset_SAR_HARRIS_chosen.json",
    #         "E:/workspace/SOMatch/preprocess/results/subset_PS-RGB_HARRIS_chosen.json"
    #     ], 
    #     save_dir="E:/workspace/SOMatch/preprocess/results/subset_chosen_win.geojson", 
    #     win_size=(320, 320), modals=['SAR-Intensity', 'PS-RGB'], 
    # )

    # # test divide_img2pt
    # divide_img2pt(
    #     img2pt_json="E:/workspace/SOMatch/preprocess/results/subset_HARRIS_merge.json", save_path="E:/workspace/SOMatch/preprocess/results/subset_merge.json"
    # )

    # # test SpnDsetPath
    # sdp = SpnDsetPath(dset_dir="E:/datasets/spacenet-dataset/spacenet/SN6_buildings/train/AOI_11_Rotterdam", fold_list=['PS-RGB', 'SAR-Intensity'])
    # # sdp.get_subset(fold_list=['PS-RGB', 'SAR-Intensity'], num=10)
    # # print(sdp.subset['PS-RGB'])
    # sdp.split_train_valid_test(
    #     rate=[0.05, 0.01, 0], save_path="E:/workspace/SOMatch/preprocess/results/bb_/test.json"
    # )

    # # merge json
    # merge_subset([
    #     "E:/workspace/SOMatch/preprocess/results/bb_/test_test.json",
    #     "E:/workspace/SOMatch/preprocess/results/bb_/test_train.json",
    #     "E:/workspace/SOMatch/preprocess/results/bb_/test_valid.json",
    #     ], 
    #     save_path="E:/workspace/SOMatch/preprocess/results/bb_/total_set.json"
    # )
