from argparse import ArgumentParser

from torch.utils.data import Dataset


class PEDataset(Dataset):
    def __init__(self, data_list, device):
        self.data_list = data_list
        self.device = device

    def __len__(self):
        return len(self.data_list)

    def __getitem__(self, index):
        sample = self.data_list[index]
        return sample

def get_extent(pgon_gdf, geom_type_list):
    extent = dict()
    for geom_type in geom_type_list:
        extent[geom_type] = get_extent_by_geom_type(pgon_gdf, geom_type=geom_type)
    return extent

def make_periodXY(extent):
    '''
    Make periodXY based on the spatial extent
    Args:
        extent: (minx, maxx, miny, maxy)
    Return:
        periodXY: t in DDSL_spec(), [periodX, periodY]
            periodX, periodY: the spatial extend from [0, periodX]
    '''
    minx, maxx, miny, maxy = extent

    periodX =  maxx - minx
    periodY =  maxy - miny

    periodXY = [periodX, periodY]
    return periodXY

def get_extent_by_geom_type(pgon_gdf, geom_type):
    # extent: (x_min, x_max, y_min, y_max)
    if geom_type == "norm":
        return (-1, 1, -1, 1)
    elif geom_type == "origin":
        minx, miny, maxx, maxy = list(pgon_gdf.total_bounds)
        return (minx, maxx, miny, maxy)

def make_args_parser():
    parser = ArgumentParser()
    # dir
    parser.add_argument("--data_dir", type=str, default="output/")
    parser.add_argument("--model_dir", type=str, default="model_dir/")
    parser.add_argument("--log_dir", type=str, default="model_dir/")

    # data
    parser.add_argument("--pgon_filename", type=str, default="pgon_300_gdf_prj.pkl")
    parser.add_argument("--triple_filename", type=str, default="pgon_triples_geom_300_norm_df.pkl")
    parser.add_argument("--geom_type_list", nargs='+', type=str, default=["norm"],
                        help='''the type of geometry we need to consider:
                origin: the original polygon
                norm: the normalized polygn into (-1, 1, -1, 1)''')
    parser.add_argument("--data_split_num", type=int, default=0,
                        help='''we might do multiple train/valid/test split, 
        this indicate which split we will use to train
        Note that we use 1, 0, -1 to indicate train/test/valid
            1: train
            0: test
            -1: valid ''')
    parser.add_argument("--num_worker", type=int, default=0,
                        help='the number of worker for dataloader')
    parser.add_argument("--num_vert", type=int, default=300,
                        help='the number of unique vertices of one polygon')

    # model type
    parser.add_argument("--task", nargs='+', type=str, default=["rel"],
                        help='''the task 
        rel: spatial relation prediction, need to use noralized geometry of subject and object
        obj: object entity prediction (similar to link prediction), need to use original geometry
        sub: object entity prediction (similar to link prediction), need to use original geometry
        ''')
    parser.add_argument("--model_type", type=str, default="cat",
                        help='''the type of model we use, 
        cat: concat two polygon embedding
        imgcat: concat two polygon images
        ''')

    # model
    # parser.add_argument("--embed_dim", type=int, default=64,
    #     help='Point feature embedding dim')
    parser.add_argument("--dropout", type=float, default=0.1,
                        help='The dropout rate used in all fully connected layer')
    parser.add_argument("--act", type=str, default='relu',
                        help='the activation function for the encoder decoder')

    # # encoder decoder
    # parser.add_argument("--join_dec_type", type=str, default='max',
    #     help='the type of join_dec, min/max/mean/cat')

    # polygon encoder
    parser.add_argument("--pgon_enc", type=str, default="nuft_ddsl",
                        help='''the type of polygon encoder:
                resnet: ResNet based encoder
                veercnn: the CNN model proposed in https://arxiv.org/pdf/1806.03857.pdf
                nuft_ddsl: the NUDF DDSL model in the spectural domain
                nuftifft_ddsl: the NUDF + IFFT + LeNet5 mode in https://arxiv.org/pdf/1901.11082.pdf
                nuftifft_mlp: NUFT + IFFT + MLP
                ''')
    parser.add_argument("--nuft_pca_dim", type=int, default=64,
                        help='the number of pca component we want to keep')
    parser.add_argument("--nuft_pca_white", type=str, default="nw",
                        help='''nw: PCA (whiten = False)
                wb: PCA (whiten = True)''')
    parser.add_argument("--pgon_embed_dim", type=int, default=512,
                        help='the embedding dimention of polygon')
    parser.add_argument("--padding_mode", type=str, default="circular",
                        help='the type of padding method for Conv1D: circular / zeros / reflect / replicate')
    parser.add_argument("--resnet_add_middle_pool", type=str, default='F',
                        help='whether to add MaxPool1D between the middle layers of ResNet')
    parser.add_argument("--resnet_fl_pool_type", type=str, default="mean",
                        help='''the type of final pooling method: 
                mean / min /max:
                atten_[att_type]_[bn]_[nat]:
                    att_type: the type of attention
                        whole: we combine embeddings with a scalar attention coefficient
                        ele: we combine embedding with a vector attention coefficient
                    bn: the type of batch noralization type
                        no: no batch norm
                        before: batch norm before ReLU
                        after:  batch norm after ReLU
                    nat: scalar = [1,2,3], the number of attention matrix we want to go through before atten_mats2''')
    parser.add_argument("--resnet_block_type", type=str, default="basic",
                        help='the type of ResNet block we will use: basic / bottleneck')
    parser.add_argument("--resnet_layers_per_block", nargs='+', type=int, default=[],
                        help='the number of layers per resnet block, ')
    # nuft args
    parser.add_argument("--nuft_freqXY", nargs='+', type=int, default=[32, 32],
                        help='the number of frequency used for each spatial dimenstion, must be 2 -> [fx, fy]')
    parser.add_argument("--nuft_max_freqXY", type=float, default=8,
                        help='the max frequency we use for NUFT Fourier frequency')
    parser.add_argument("--nuft_min_freqXY", type=float, default=1,
                        help='the min frequency we use for NUFT Fourier frequency')
    parser.add_argument("--nuft_mid_freqXY", type=float, default=4,
                        help='the middle frequency we use for NUFT Fourier frequency')
    parser.add_argument("--nuft_freq_init", type=str, default="fft",
                        help='''the frequency initilization method we use for NUFT Fourier frequency
                "geometric": geometric series
                "fft": fast fourier transformation
        ''')
    parser.add_argument("--j", type=int, default=2,
                        help='the j-simplex dimention we consider')
    # parser.add_argument("--pgon_nuft_embed_norm", type=str, default='F',
    #     help='whether to normalize the polygon NUFT resulting embedding before ffn')
    parser.add_argument("--pgon_nuft_embed_norm_type", type=str, default='none',
                        help='''the type of normalization for the polygon NUFT resulting embedding before ffn
        none: no norm
        l2: l2 norm
        bn: batch norm 
        ''')
    parser.add_argument("--spec_pool_max_freqXY", nargs='+', type=int, default=[16, 16],
                        help='''the maximum number of spectural pooling frequency 
            used for each spectural dimenstion, must be 2 -> [fx, fy]''')
    parser.add_argument("--spec_pool_min_freqXY_ratio", type=float, default=0.3,
                        help='''The minimum freq ratio = min_fx/spec_pool_max_freqXY in spectual pooling, 
            https://arxiv.org/pdf/1506.03767.pdf''')

    # space encoder
    parser.add_argument("--spa_enc", type=str, default="kdelta",
                        help='the type of spatial encoder, none/naive/gridcell/hexagridcell/theory/theorydiag')
    parser.add_argument("--spa_embed_dim", type=int, default=26,
                        help='Point Spatial relation embedding dim')
    parser.add_argument("--freq", type=int, default=16,
                        help='The number of frequency used in the space encoder')
    parser.add_argument("--max_radius", type=float, default=2,
                        help='The maximum spatial context radius in the space encoder')
    parser.add_argument("--min_radius", type=float, default=1e-6,
                        help='The minimum spatial context radius in the space encoder')
    parser.add_argument("--spa_f_act", type=str, default='relu',
                        help='The final activation function used by spatial relation encoder')
    parser.add_argument("--freq_init", type=str, default='geometric',
                        help='The frequency list initialization method')
    # parser.add_argument("--spa_enc_use_layn", type=str, default='F',
    #     help='whether to use layer normalzation in spa_enc')
    parser.add_argument("--spa_enc_use_postmat", type=str, default='F',
                        help='whether to use post matrix in spa_enc')

    # rbf
    parser.add_argument("--num_rbf_anchor_pts", type=int, default=100,
                        help='The number of RBF anchor points used in the "rbf" space encoder')
    parser.add_argument("--rbf_kernal_size", type=float, default=10e2,
                        help='The RBF kernal size in the "rbf" space encoder')
    parser.add_argument("--rbf_kernal_size_ratio", type=float, default=0,
                        help='The RBF kernal size ratio in the relative "rbf" space encoder')

    parser.add_argument("--k_delta", type=int, default=12,
                        help='The number of (deltaX, deltaY) used in the "kdelta" space encoder')

    # ffn
    parser.add_argument("--ffn_type", type=str, default="ffn",
                        help='''ffn:  use MultiLayerFeedForwardNN()
                ffnf: use MultiLayerFeedForwardNNFlexible()''')
    parser.add_argument("--ffn_hidden_layers", nargs='+', type=int, default=[],
                        help='a list of hidden dimention in MultiLayerFeedForwardNNFlexible in the polygon encoder')
    parser.add_argument("--num_hidden_layer", type=int, default=1,
                        help='The number of hidden layer in feedforward NN in the (global) space encoder')
    parser.add_argument("--hidden_dim", type=int, default=512,
                        help='The hidden dimention in feedforward NN in the (global) space encoder')
    parser.add_argument("--use_layn", type=str, default="T",
                        help='use layer normalization or not in feedforward NN in the (global) space encoder')
    parser.add_argument("--skip_connection", type=str, default="T",
                        help='skip connection or not in feedforward NN in the (global) space encoder')

    # polygon decoder
    parser.add_argument("--pgon_dec", type=str, default="explicit_conv",
                        help='''the type of polygon decoder to do unsupervised learning, 
            explicit_mlp: elementwise mlp for each grid point
            explicit_conv: 3 layers of convolution network
            ''')
    parser.add_argument("--pgon_dec_grid_init", type=str, default="circle",
                        help='''We generate a list of grid points for polygon decoder, the type of grid points are:
            uniform: points uniformly sampled from (-1, 1, -1, 1) 
            circle: points sampled equal-distance on a circle whose radius is randomly sampled
            kdgrid: k-d regular grid
            ''')
    parser.add_argument("--pgon_dec_grid_enc_type", type=str, default="spa_enc",
                        help='''the type to encode the grid point
            none: no encoding, use the original grid point
            spa_enc: use space encoder to encode grid point before
            ''')

    parser.add_argument("--grt_loss_func", type=str, default="LOOPL2",
                        help='''The generative loss function:
            L2: L2 distance between two corresponding points
            NN: nearest neighbor loss as Equation 1 in https://arxiv.org/pdf/1712.07262.pdf
            LOOPL2: Loop L2 distance
            ''')

    parser.add_argument("--do_weight_norm", type=str, default="F",
                        help="whether we use a weight normlized linear layer for POlygn Classification")

    parser.add_argument("--do_polygon_random_start", type=str, default="T",
                        help="whether we random pick a point on polygon to start the training for POlygn Classification")

    parser.add_argument("--do_data_augment", type=str, default="F",
                        help="whether do polygon data argumentation, flip, rotate, scale polygons in each batch")
    parser.add_argument("--do_online_data_augment", type=str, default="F",
                        help='''whether do online polygon data argumSave file atentation during training
            T: data augment in mini-batch
            F: data augment when loading the data
            ''')
    parser.add_argument("--data_augment_type", type=str, default="none",
                        help='''the type of data augmentation:
            none: no data argumentation
            flp: flip
            rot: rotate
            tra: translate
            scl: scale
            noi: add white noise to polygon
            ''')
    parser.add_argument("--num_augment", type=int, default=0,
                        help='The number of copies we will create we do when we do data argumentation')

    # train
    parser.add_argument("--opt", type=str, default="adam")
    parser.add_argument("--lr", type=float, default=0.01,
                        help='learning rate')

    parser.add_argument("--weight_decay", type=float, default=0.000,
                        help='weight decay of adam optimizer')
    parser.add_argument("--task_loss_weight", type=float, default=0.95,
                        help='the weight of classification loss when we do join training')
    parser.add_argument("--pgon_norm_reg_weight", type=float, default=0.02,
                        help='the weight of polygon embedding norm regularizer')

    parser.add_argument("--grt_epoches", type=int, default=0,
                        help='the maximum epoches for generative model converge')
    parser.add_argument("--cla_epoches", type=int, default=10,
                        help='the maximum epoches for polygon classifier model converge')
    parser.add_argument("--max_burn_in", type=int, default=5000,
                        help='the maximum iterator for relative/global model converge')
    parser.add_argument("--batch_size", type=int, default=128)
    parser.add_argument("--tol", type=float, default=0.000001)

    parser.add_argument("--balanced_train_loader", type=str, default="F",
                        help="whether we do BalancedSampler for polygon classification")

    parser.add_argument("--tb", type=str, default="T",
                        help="whether to log to tensorboard")

    # eval
    parser.add_argument("--log_every", type=int, default=100)
    parser.add_argument("--val_every", type=int, default=100)

    # load old model
    parser.add_argument("--load_model", action='store_true')

    # cuda
    parser.add_argument("--device", type=str, default="cuda:0")

    return parser