# -*- coding: utf-8 -*-
"""
Created on Tue Apr 21 14:47:14 2020

@author: Farman
"""


import numpy as np
import joblib
import sys
import pathlib


def get_train_dataset(scene, truth, sample_points):
    '''
    Construct dataset for svm/svc training.

    Parameters
    ----------
    scene : numpy array.
        Contains pixel data (3-D).
        
    truth : numpy array.
        Ground truth data(2-D).
        
    sample_points : integer.
        Train dataset sample points per class.

    Returnss
    -------
    X : sampled and serialized scene data (1-D).
    y : sampled and serialized groud truth data (1-D).
    bands : bands of input, for predict dataset preparing.
    '''    
    # serialize
    #--------------------------------------------
    (h, w) = truth.shape
    X = np.array([scene[y,x,:] for x in range(w) for y in range(h)])
    y = np.array([truth[y, x]  for x in range(w) for y in range(h)])

    # static value distribution of y, for debug purpose.
    #--------------------------------------------
    print('\nIn origin data:')
    print('truth_value\tPixels')
    print('-' * 20)
    
    for n in range(y.max() + 1):
        print('\t', n, '\t\t', len(y[y==n]))


    # split test data by truth
    # in order to prepare for data sampling for subset construction
    #--------------------------------------------
    
    classified = []
    print('\nClassified Data:')
    print('truth_value\tPixels')
    print('-' * 20)
    
    for n in range(y.max() + 1):
        classified.append(X[y==n])
        print('\t', n, '\t\t', len(classified[n]))
    
    # now truth value of classed scene pixels is its order num in list.


    # pick part of data in all to construct train dataset
    #-------------------------------------------------------------------------
    data_num = sample_points
    print('\nPick %d pixels per class.'%data_num)
    
    picked = [sub_class[:data_num] for sub_class in classified]
    print('\nPicked Data:')
    print('truth_value\tPixels')
    print('-' * 20)
    
    # truth picking
    data_point = 0    
    picked_y = []
    
    for n in range(len(picked)):
        print('\t', n, '\t\t', len(picked[n]))
        data_point += len(picked[n])
        picked_y += [n for m in range(len(picked[n]))]
        
    print('\nTotal data points picked X:', data_point)
    print('\nTotal data points picked y:', len(picked_y))
    
    # serialize picked data.
    #-------------------------------------------------------------------------
    X = []
    
    for n in range(y.max() + 1):
        X += picked[n].tolist()
        
    y = picked_y
    
    # somebody said float data has better efforts than int data.
    X = np.array(X, dtype='float')
    y = np.array(y, dtype='float')

    bands = scene.shape[2]
    return X, y, bands



def prepare(scene_file, truth_file, sample_points):
    '''
    Prepare dataset from data files.
    Sample sample_points points per class.

    Parameters
    ----------
    scene_file : string.
        Name of scene file.
        
    truth_file : string.
        Name of ground truth file.
    
    sample_points : integer.
        Train dataset sample points per class.

    Returns
    -------
    X, y and bands_per_pixel.
    X is the input dataset for training,
    y is the truth value for training.
    bands_per_pixel is the bands of the data will be trained.

    '''
    sample_points = int(sample_points)
    scenes = joblib.load(scene_file)
    scene  = scenes.get(list(scenes.keys())[0])
    
    truthes = joblib.load(truth_file)
    truth   = truthes.get(list(truthes.keys())[0])
    X, y, bands = get_train_dataset(scene, truth, sample_points)
    return X, y, bands


def prepare_data_file(scene_file, truth_file, sample_points):
    '''
    

    Parameters
    ----------
    scene_file : string.
        Name of scene file.
        
    truth_file : string.
        Name of ground truth file.

    sample_points : integer.
        Train dataset sample points per class.
        
    Returns
    -------
    Persistence data file contains:
        X and y with name 'Xy.joblib'
        bands with name 'bands.joblib'
        

    '''
    sample_points = int(sample_points)
    path = pathlib.Path(scene_file).parent
    X, y, bands = prepare(scene_file, truth_file, sample_points)
    joblib.dump((X, y), path / 'Xy.joblib')
    joblib.dump(bands,  path / 'bands.joblib')
    return

'''
scene_file = r'D:\Pavia\Pavia\PaviaUniversity\PaviaU.joblib'
truth_file = r'D:\Pavia\Pavia\PaviaUniversity\PaviaU_gt.joblib'
X, y, shape = prepare(scene_file, truth_file)
'''

if __name__ == '__main__':
    if len(sys.argv) == 1:
        scene_file = input('Scene file : ')
        truth_file = input('Truth file : ')
        sample_points = input('Sample points : ')
        
        if len(scene_file) and len(truth_file) and len(sample_points):
            prepare_data_file(scene_file, truth_file, sample_points)
            print('Job done.')
        else:
            print('Usage:')
            print('    python3 svc0dataset.py scene_file truth_file sample_points')
        
        
    elif len(sys.argv) == 3:
        scene_file = sys.argv[1]
        truth_file = sys.argv[2]
        prepare_data_file(scene_file, truth_file)
        
    else:
        print('Usage:')
        print('    python3 svc0dataset.py')
        print('or')
        print('    python3 svc0dataset.py scene_file truth_file')