import preprocess.Data2Feature as D2F
import preprocess.label_encode as Encode
import operator
import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.metrics import accuracy_score

NAME = 'LGBM'

class Fit:
    
    # ===========================================================================================================================================
    # Classifier: LGBM 
    #     For more information about LGBM algorithm, 
    #     please refer to https://github.com/Microsoft/LightGBM
    #
    # Input: X_train,Y_train,X_test,Y_test,CutMethod
    #
    # Output: predictions of X_test
    #
    # ================
    # Results:
    #
    # CutMethod == 'char':
    #     LGBM-char: Test Set Accuracy: 0.836420
    # CutMethod == 'parse':
    #     LGBM-parse: Test Set Accuracy: 0.810700
    # ============================================================================================================================================
    
    # set params
    N1 = {'char':2,'parse':3}     # control the begining of all-grams of websites
    N2 = {'char':2,'parse':4}     # control the end of all-grams of websites
    Min_Web = 2              # feature minimum appeared times in whole dataset for website (url) information
    Min_Title = 2             # feature minimum appeared times in whole dataset for title information
    Title =True              # if Title == True: use title information to extract features
    Website = True            # if Website == True: use website (url) information to extract features
    Add = False              # if Add == True: count the number of features appeared in each data as input; else: only use 0-1
    Stopwords = False          # if Stopwords == True: remove stopwords such as punctuations
    
    def __init__(self,X_train,Y_train,X_test,Y_test,CutMethod):
        self.predictions = self.fit(X_train,Y_train,X_test,Y_test,CutMethod)
        
    # set parameters for LGBM algorithm
    #     For more information about parameters, please refer to      
    #     https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst
    @staticmethod
    def set_params(Y_train,Y_test): 
        label_size = len(np.unique(np.hstack((Y_train,Y_test))))
        params = {}
        params['boosting_type'] = 'dart'         # Dropouts meet Multiple Additive Regression Trees
        params['objective'] = 'multiclass'
        params['metric'] = 'multi_error'
        params['num_class'] = label_size
        params['max_bin'] = 255             # max number of bins that feature values will be bucketed in
        params['learning_rate'] = 0.4        # shrinkage rate
        params['num_leaves'] = 2**4 - 1      # number of leaves in one tree
        params['min_data_in_leaf'] = 3       # minimal number of data in one leaf
        params['min_sum_hessian_in_leaf'] = 0.1 # minimal sum hessian in one leaf
        params['is_unbalance'] = 'true'      # unbalanced dataset
        params['verbose'] = -1
        return params

    def fit(self,X_train,Y_train,X_test,Y_test,CutMethod):


        X = {}
        X['train'] = X_train
        X['test'] = X_test
        Y = {}
        Y['train'] = Y_train
        Y['test'] = Y_test
        
        # extract features from websites and titles
        X_features = D2F.Data2Feature(X,self.N1[CutMethod],self.N2[CutMethod],
                                      website=self.Website,
                                      title=self.Title,
                                      cutmethod=CutMethod,
                                      add=self.Add,
                                      stopwords=self.Stopwords, 
                                      min_web=self.Min_Web,
                                      min_title=self.Min_Title).features
        
        # encode string labels ('Art') to numbers (1)
        [Train_feature,Test_feature,Train_category,Test_category,le] = Encode.refomat(X_features,Y)
        
        feature_size = len(Train_feature[0])
        Data_Train = lgb.Dataset(Train_feature,Train_category)
        
        # fit classifiers
        params = Fit.set_params(Y_train,Y_test)
        model = lgb.train(params, train_set=Data_Train, num_boost_round=100, valid_sets=Data_Train, verbose_eval=20)
        p_test = model.predict(Test_feature) # return probability of each category
        p_test = np.argmax(p_test,axis=1)   # choose the category with maximum probability
        
        # # decode labels for testset
        Y_test_prediction = le.inverse_transform(p_test)
        print ('LGBM-%s: Test Set Accuracy: %f' % (CutMethod,accuracy_score(p_test,Test_category)))
        return Y_test_prediction