from os import path
#import time
import pprint as p
import copy

import pandas as pd
import numpy as np
import mglearn
from matplotlib import pyplot as plot

from sklearn.preprocessing import StandardScaler, PolynomialFeatures as Poly

from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.neighbors import KNeighborsClassifier as KNC
from sklearn.linear_model import LinearRegression as LinearReg, Ridge, Lasso
from sklearn.linear_model import LogisticRegression as LogisticReg
from sklearn.svm import LinearSVC, SVC
from sklearn.ensemble import RandomForestClassifier as RFC, GradientBoostingClassifier as GBC
from sklearn.neural_network import MLPClassifier as MLPC

TRAIN_DATA_PATH = path.realpath('kaggle/titanic/train.csv')
TEST_DATA_PATH = path.realpath('kaggle/titanic/test.csv')
# TRAIN_DATA_PATH = path.realpath('train.csv')
# TEST_DATA_PATH = path.realpath('test.csv')
TRAIN_KEYS = ('Parch', 'SibSp', 'Embarked', 'Fare', "Sex", "Pclass", "Age")
TAG_KEY = 'Survived'

class SK:
    def __init__(self):
        self.data = pd.read_csv(TRAIN_DATA_PATH)
        self.heat_map = {}
        
    def get_data(self, data_type=None):
        data_keys, dummy_keys = [i for i in TRAIN_KEYS], [i for i in TRAIN_KEYS]
        dummy_keys.remove('Fare')
        return_data = pd.get_dummies(self.data.get(data_keys), columns=dummy_keys), self.data.get(TAG_KEY)
        # return_data[0]['Fare'] = return_data[0]['Fare'].astype(int)
        return_data = train_test_split(*return_data)
        if data_type == 'neural':
            #xtr, xte, ytr, yte = train_test_split(pd.get_dummies(self.data.get(data_keys), columns=['Sex']), self.data.get(TAG_KEY))
            xtr, xte, ytr, yte = return_data
            scaler = StandardScaler().fit(xtr)
            return scaler.transform(xtr), scaler.transform(xte), ytr, yte
        return return_data
        
    def train_test_split(self, train_keys):
        samples, tags = np.array([self.data.get(key) for key in train_keys]).T, np.array(self.data.get(TAG_KEY))
        print(f'samples shape: {samples.shape}, tags shape: {tags.shape}')
        return train_test_split(samples, tags)
    
    def train(self):
        self.retry_times = 5
        for _ in range(self.retry_times):
            data = self.get_data()
            #self.knn(*data)
            #self.random_integration(*data)
            # self.linear(*data)
            
            # data_neural = self.get_data('neural')
            # self.neural(*data_neural)
            self.core(*data)
        self.plot_heat()
    
    def knn(self, xtr, xte, ytr, yte):
        score_map = []
        for neighbor in (1,2,3,4,5,6,7,8,9):
            knn = KNC(n_neighbors=neighbor).fit(xtr, ytr)
            score_map.append((knn.score(xtr, ytr), knn.score(xte, yte), 'neighbors:%s' % neighbor))
        p.pprint({'knn': score_map})
        
    def random_integration(self, *args):
        #self.random_forest(*args)
        self.random_gradient(*args)
        pass
            
    def random_forest(self, xtr, xte, ytr, yte):
        score_map = []
        for estimator in (1, 3, 6, 10, 15, 21, 28):
            forest = RFC(n_estimators=estimator, max_depth=5).fit(xtr, ytr)
            score_map.append((forest.score(xtr, ytr), forest.score(xte, yte), 'estimators:%s' % estimator))
        p.pprint({'forest': score_map})
        
    def random_gradient(self, xtr, xte, ytr, yte):
        score_map = []
        learning_rate = [0.14]
        #learning_rate = (0.05, )
        retry_times = 10
        for _ in range(retry_times):
            score_tmp = []
            for rate in learning_rate:
                #model = GBC(learning_rate=rate, max_depth=depth).fit(xtr, ytr)
                #score_map.append((model.score(xtr, ytr), model.score(xte, yte), 'rate:%s, depth:%s' % (rate, depth)))
                model = GBC(n_estimators=150, max_depth=3, learning_rate=rate)
                score = cross_val_score(model, xtr._append(xte), ytr._append(yte), cv=KFold(n_splits=10, shuffle=True))
                score_tmp.append(score.mean())
            score_map.append(score_tmp)
        #p.pprint({'random_gradient': score_map}, width=120)
        #p.pprint({'random_gradient': score_map})
        
        #scores = np.array(score_map).reshape(len(learning_rate), len(max_depth))
        #mglearn.tools.heatmap(scores, xlabel='depth', ylabel='rate', xticklabels=max_depth, yticklabels=learning_rate, cmap='viridis', fmt='%0.3f')
        #plot.show()
        
        score_np = np.array(score_map).T
        for index in range(len(learning_rate)):
            print(f'gradient rate:{learning_rate[index]}, avg score with cross value:{sum(score_np[index])/retry_times}')
            
    def linear(self, xtr, xte, ytr, yte):
        #model = LinearReg().fit(xtr, ytr)
        #print(f'linear model test: {model.score(xte, yte)}, train:{model.score(xtr, ytr)}')        
        #self.linear_ridge(xtr, xte, ytr, yte)
        #self.linear_lasso(xtr, xte, ytr, yte)
        self.linear_logistic_SVC(xtr, xte, ytr, yte)
        
    def linear_ridge(self, xtr, xte, ytr, yte):
        ridge_score = []
        for alpha in (0.1, 0.5, 1, 5, 10):
            ridge = Ridge(alpha=alpha).fit(xtr, ytr)
            ridge_score.append((ridge.score(xtr, ytr), ridge.score(xte, yte), 'alpha:%s' % alpha))
        p.pprint({'ridge': ridge_score})
    
    def linear_lasso(self, xtr, xte, ytr, yte):
        lasso_score = []
        for iter_num in (3, 4, 5):
            for alpha in (0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1):
                lasso = Lasso(alpha=alpha, max_iter=pow(10, iter_num)).fit(xtr, ytr)
                lasso_score.append((lasso.score(xtr, ytr), lasso.score(xte, yte), 'alpha:%s, iter_num:%s' % (alpha, iter_num)))
        p.pprint({'lasso': lasso_score})
        
    def linear_logistic_SVC(self, xtr, xte, ytr, yte):
        max_iter=100000
        retry_times=5
        params_C = (0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
        
        logistic_score = []
        log_poly = []
        svc_score = []
        svc_poly = []
        svc_scaler = []
        svc_poly_scaler = []
        
        
        poly = Poly(include_bias=False)
        x_poly = poly.fit(xtr._append(xte)).transform(xtr._append(xte))
        scaler = StandardScaler()
        tr_scaler = scaler.fit(xtr._append(xte)).transform(xtr._append(xte))
        tr_poly_scaler = scaler.fit(x_poly).transform(x_poly)
        print(f'max_iter: {max_iter}')
        for penalty in ('l1', ):
            for C in params_C:
                print(f'params: {C}')
                tmp_svc, tmp_svc_poly, tmp_svc_scaler, tmp_svc_poly_scaler = [], [], [], []
                for _ in range(retry_times):
                    svc = LinearSVC(C=C, penalty=penalty, dual='auto', max_iter=max_iter)
                    # log = LogisticReg(C=C, penalty=penalty, solver='liblinear').fit(xtr_poly, ytr)
                    #svc = LinearSVC(C=C, penalty=penalty, dual='auto', max_iter=max_iter).fit(xtr_poly, ytr)
                    # log_poly.append((log.score(xtr_poly, ytr), log.score(xte_poly, yte), 'type:%s, C:%s' % (penalty, C)))
                    #svc_poly.append((svc.score(xtr_poly, ytr), svc.score(xte_poly, yte), 'type:%s, C:%s' % (penalty, C)))
                    # print('svc poly')
                    # tmp_svc_poly.append(cross_val_score(svc, x_poly, ytr._append(yte), cv=KFold(n_splits=5, shuffle=True)).mean())
                    # log = LogisticReg(C=C, penalty=penalty, solver='liblinear').fit(xtr, ytr)
                    #svc = LinearSVC(C=C, penalty=penalty, dual='auto', max_iter=max_iter).fit(xtr, ytr)
                    # logistic_score.append((log.score(xtr, ytr), log.score(xte, yte), 'type:%s, C:%s' % (penalty, C)))
                    #svc_score.append((svc.score(xtr, ytr), svc.score(xte, yte), 'type:%s, C:%s' % (penalty, C)))
                    print('svc')
                    tmp_svc.append(cross_val_score(svc, xtr._append(xte), ytr._append(yte), cv=KFold(n_splits=10, shuffle=True)).mean())
                    #svc = LinearSVC(C=C, penalty=penalty, dual='auto', max_iter=max_iter).fit(xtr_poly_scaler, ytr)
                    # print('svc poly scaler')
                    # tmp_svc_poly_scaler.append(cross_val_score(svc, tr_poly_scaler, ytr._append(yte), cv=KFold(n_splits=5, shuffle=True)).mean())
                    #svc = LinearSVC(C=C, penalty=penalty, dual='auto', max_iter=max_iter).fit(xtr_scaler, ytr)
                    # print('svc scaler')
                    # tmp_svc_scaler.append(cross_val_score(svc, tr_scaler, ytr._append(yte), cv=KFold(n_splits=5, shuffle=True)).mean())
                # svc_poly.append((sum(tmp_svc_poly) / retry_times, 'type:%s, C:%s' % (penalty, C)))
                svc_score.append((sum(tmp_svc) / retry_times, 'type:%s, C:%s' % (penalty, C)))
                # svc_scaler.append((sum(tmp_svc_scaler) / retry_times, 'type:%s, C:%s' % (penalty, C)))
                # svc_poly_scaler.append((sum(tmp_svc_poly_scaler) / retry_times, 'type:%s, C:%s' % (penalty, C)))
        # p.pprint({'logistic': logistic_score})
        p.pprint({'SVC': svc_score})
        # p.pprint({'logistic poly 2': log_poly})
        # p.pprint({'svc poly 2': svc_poly})
        # p.pprint({'svc scaler': svc_scaler})
        # p.pprint({'svc_poly_scaler': svc_poly_scaler})
        
    def neural(self, *args):
        self.neural_mlp(*args)
        
    def neural_mlp(self, *args):
        heat_map = []
        xtr, xte, ytr, yte = args
        #first_params = [i for i in range(5, 120, 7)]
        #second_params = [i for i in range(1, 10)]
        first_params = [170]
        second_params = [126]
        third_params = [2, 3, *[i for i in range(4, 21, 2)]]
        # third_params = [3]
        retry_times = 5
        for hide_unit_a in first_params:
            # tmpa = []
            for hide_unit_b in second_params:
                for hide_unit_c in third_params:
                    tmp_map = []
                    for _ in range(retry_times):
                        #mlp = MLPC(hidden_layer_sizes=(hide_unit_a, hide_unit_b, ))
                        mlp = MLPC(hidden_layer_sizes=(hide_unit_a, hide_unit_b, hide_unit_c), max_iter=2000)
                        tmp_map.append(cross_val_score(mlp, (*xtr, *xte), (*ytr, *yte), cv=KFold(n_splits=5, shuffle=True)).mean())
                        #print(f'neural score: unit: {hide_unit_a}, {hide_unit_b}, train score:{mlp.score(xtr, ytr)}, test score: {mlp.score(xte, yte)}')
                    # print(f'first:{hide_unit_a}, second:{hide_unit_b}')
                    print(f'first: {hide_unit_a}, second: {hide_unit_b}, third: {hide_unit_c}')
                    # tmpa.append(sum(tmp_map) / len(tmp_map))
                    heat_map.append([sum(tmp_map) / len(tmp_map)])
        mglearn.tools.heatmap(heat_map, xlabel='second', ylabel='first', xticklabels=('none', ), yticklabels=third_params, cmap='viridis', fmt='%0.4f')
        print(heat_map)
        plot.show()
    
    def core(self, *args):
        self.core_SVM(*args)
        
    def core_SVM(self, xtr, xte, ytr, yte):
        scaler = StandardScaler().fit(xtr)
        xtr_scaled = scaler.transform(xtr)
        xte_scaled = scaler.transform(xte)
        tr_scaled = scaler.fit(xtr._append(xte)).transform(xtr._append(xte))
        param_C = [5, 8, 10, 30, 50, 75, 100, 300, 500]
        param_gamma = [0.00005, 0.000075, 0.0001, 0.0003, 0.0005, 0.0008, 0.001]
        # param_C = [0.1, 1]
        # param_gamma = [0.1, 0.5]
        self.param_C = param_C
        self.param_gamma = param_gamma
        if not self.heat_map:
            for C in param_C:
                self.heat_map[C] = {}
                for gamma in param_gamma:
                    self.heat_map[C][gamma] = 0
        score = []
        for C in param_C:
            # gamma_map = []
            for gamma in param_gamma:
                print(f'C:{C}, gamma:{gamma}')
                svc = SVC(C=C, gamma=gamma).fit(xtr_scaled, ytr)
                # score.append(f'train:{svc.score(xtr_scaled, ytr)}, test:{svc.score(xte_scaled, yte)}, C:{C}, gamma:{gamma}')
                # gamma_map.append(cross_val_score(svc, tr_scaled, ytr._append(yte), cv=KFold(n_splits=10, shuffle=True)).mean())
                self.heat_map[C][gamma] += cross_val_score(svc, tr_scaled, ytr._append(yte), cv=KFold(n_splits=10, shuffle=True)).mean()
            # heat_map.append(gamma_map)
        # p.pprint(score)
        # mglearn.tools.heatmap(heat_map, xlabel='gamma', ylabel='C', xticklabels=param_gamma, yticklabels=param_C, cmap='viridis', fmt='%0.4f')
        # plot.show()
        
    def plot_heat(self):
        heat_map = []
        for C in self.heat_map:
            tmp_map = []
            for gamma in self.heat_map[C]:
                tmp_map.append(self.heat_map[C][gamma] / self.retry_times)
            heat_map.append(tmp_map)
        mglearn.tools.heatmap(heat_map, xlabel='gamma', ylabel='C', xticklabels=self.param_gamma, yticklabels=self.param_C, cmap='viridis', fmt='%0.4f')
        plot.show()
    
def main():
    SK().train()
    

if __name__ == '__main__':
    main()