from os import path
import copy
import pprint as p

import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.preprocessing import StandardScaler, PolynomialFeatures as Poly
from sklearn.svm import LinearSVC, SVC
from sklearn.linear_model import LogisticRegression as LogReg
from sklearn.ensemble import RandomForestClassifier as RFC, GradientBoostingClassifier as GBC
from sklearn.neural_network import MLPClassifier as MLPC

TRAIN_DATA_PATH = path.realpath('kaggle/spaceship_titanic/data/train.csv')
TEST_DATA_PATH = path.realpath('kaggle/spaceship_titanic/data/test.csv')

class Model():
    def __init__(self, name):
        self.train_data = pd.read_csv(TRAIN_DATA_PATH)
        self.test_data = pd.read_csv(TEST_DATA_PATH)
        self.name = name
        self.dummy_columns = ['HomePlanet', 'CryoSleep', 'Destination', 'Age', 'VIP', 'Cabin_deck', 'Cabin_side', 'Cabin_num']
        self.data_keys = ['FoodCourt', 'Spa', 'RoomService', 'ShoppingMall', 'VRDeck', *[i for i in self.dummy_columns]]
        
        # deal with Cabin column
        print('begin deal Cabin')
        self.train_data.loc[:,['Cabin_deck', 'Cabin_num', 'Cabin_side']] = 0
        self.test_data.loc[:,['Cabin_deck', 'Cabin_num', 'Cabin_side']] = 0
        for data in (self.train_data, self.test_data):
            for index, rst in zip(range(len(data)), data['Cabin'].isna()):
                if not rst:
                    split_rst = data.loc[index, 'Cabin'].split('/')
                    split_rst = [i.strip() for i in split_rst]
                    data.loc[index, ['Cabin_deck', 'Cabin_num', 'Cabin_side']] = split_rst
            data.drop(['Cabin'], axis=1, inplace=True)
                    
        print('begin deal empty')
        sleep_num, vip_num, home_num = 0, 0, 0
        for data in (self.train_data, self.test_data):
            for index in range(len(data)):
                # set default numeric columns empty to zero
                for col_name in ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']:
                    if np.isnan(data[col_name][index]):
                        data.loc[index, col_name] = 0
                        
                # deal empty CryoSleep
                # '''
                if np.isnan(data['CryoSleep'][index]):
                    if not any(data.iloc[index].isna()[['FoodCourt', 'Spa', 'RoomService', 'ShoppingMall', 'VRDeck']]):
                        if sum(data.iloc[index][['FoodCourt', 'Spa', 'RoomService', 'ShoppingMall', 'VRDeck']]) == 0:
                            data.loc[index, 'CryoSleep'] = True
                            sleep_num += 1
                # '''
                        
                # deal with VIP
                if data.iloc[index].isna()['VIP'] and data.loc[index, 'HomePlanet'] == 'Earth':
                    data.loc[index, 'VIP'] = False
                    vip_num += 1
                    
                # deal with HomePlanet
                if data.iloc[index].isna()['HomePlanet']:
                    if data.iloc[index]['Cabin_deck'] in ('A', 'B', 'C'):
                        data.loc[index, 'HomePlanet'] = 'Europa'
                        home_num += 1
                    elif data.iloc[index]['Cabin_deck'] in ['G']:
                        data.loc[index, 'HomePlanet'] = 'Earth'
                        home_num += 1
        print(f'home:{home_num}, sleep:{sleep_num}, vip:{vip_num}')
                    
        # '''
        poly_keys = self.dummy_columns[:]
        for col in ('Age', 'Cabin_num'):
            if col in poly_keys:
                poly_keys.remove(col)
        poly_dummies = pd.get_dummies(self.train_data._append(self.test_data, ignore_index=True)[poly_keys], columns=poly_keys)
        poly_cols = pd.DataFrame(Poly(degree=3, interaction_only=True).fit(poly_dummies).transform(poly_dummies))
        poly_cols.columns = poly_cols.columns.astype(str)
        split_index = len(self.train_data)
        self.train_data = pd.concat([self.train_data, poly_cols.iloc[:split_index].reset_index()], axis=1)
        self.test_data = pd.concat([self.test_data, poly_cols.iloc[split_index:].reset_index()], axis=1)
        # '''
        '''
        poly_keys = self.dummy_columns[:]
        poly_keys.remove('Age')
        poly_dummies = pd.get_dummies(self.train_data[poly_keys], columns=poly_keys)
        poly_cols = pd.DataFrame(Poly(degree=3, interaction_only=True).fit(poly_dummies).transform(poly_dummies))
        self.train_data = pd.concat([self.train_data, poly_cols], axis=1)
        '''
        
        return_data = pd.get_dummies(self.train_data[self.data_keys], columns=self.dummy_columns)
        self.train_data_split = train_test_split(return_data, self.train_data['Transported'])
        # scaler = StandardScaler()
        # num_keys = list(set(self.data_keys) - set(self.dummy_columns))
        # self.train_data_split[0].loc[:, num_keys] = scaler.fit(self.train_data_split[0][num_keys]).transform(self.train_data_split[0][num_keys])
        # self.train_data_split[1].loc[:, num_keys] = scaler.transform(self.train_data_split[1][num_keys])
        # fill empty VIP
        '''
        model = LinearSVC(C=0.1, dual=False)
        fill_columns = ['HomePlanet', 'Destination', 'Age']
        scaler = StandardScaler()
        for column in fill_columns:
            print(f'begin deal with col {column}')
            # print(f'{column}:{self.train_data[column].count()}')
            data_keys, dummy_columns = self.data_keys[:], self.dummy_columns[:]
            data_keys.remove(column)
            dummy_columns.remove(column)
            numeric_keys = list(set(data_keys) - set(dummy_columns))
            filled, to_fill = pd.DataFrame(), pd.DataFrame()
            for data in (self.train_data, self.test_data):
                for index in range(len(data)):
                    if data.iloc[index].isna()[column]:
                        to_fill = to_fill._append(data.iloc[index])
                    else:
                        filled = filled._append(data.iloc[index])
            dummies = pd.get_dummies(filled._append(to_fill, ignore_index=True)[self.data_keys], columns=dummy_columns)
            split_index = len(filled)
            scaler.fit(dummies.iloc[:split_index][numeric_keys])
            dummies.loc[:, numeric_keys] = scaler.transform(dummies.loc[:, numeric_keys])
            train_keys = list(dummies.keys())
            train_keys.remove(column)
            if column in ['VIP', 'Age']:
                model.fit(dummies.iloc[:split_index][train_keys], dummies.iloc[:split_index][column].astype(int))
            else:
                model.fit(dummies.iloc[:split_index][train_keys], dummies.iloc[:split_index][column])
            predictions = model.predict(dummies.iloc[split_index:][train_keys])
            rst_index = 0
            for data in (self.train_data, self.test_data):
                for index in range(len(data)):
                    if data.iloc[index].isna()[column]:
                        data.loc[index, column] = predictions[rst_index]
                        rst_index += 1
                        if column == 'VIP':
                            print(f'show VIP:{data.loc[index].to_dict()}')
            # print(f'{column}:{self.train_data[column].count()}')
        '''
        
    def exec(self):
        self.train(*self.train_data_split)
        
    def train(self, *args):
        if self.name == 'linear_svc':
            self.linear_svc(*args)
            return
        elif self.name == 'linear_log':
            self.linear_log(*args)
            return
        elif self.name == 'tree_gbc':
            self.tree_gbc(*args)
            return
        elif self.name == 'mlp':
            self.mlp(*args)
            return
        elif self.name == 'core':
            self.core(*args)
            return
        elif self.name == 'tree_rfc':
            self.tree_rfc(*args)
            return
        raise Exception(f'name {self.name} not match')
    
    def predict_and_submit(self):
        # prepare data
        print('predict begin')
        dummies = pd.get_dummies(self.train_data._append(self.test_data, ignore_index=True)[self.data_keys], columns=self.dummy_columns)
        targets = self.train_data['Transported']
        split_index = len(self.train_data)
        # scaler = StandardScaler().fit(dummies[:split_index])
        # tr_scaled = scaler.transform(dummies[:split_index])
        # scaler = StandardScaler()
        # num_keys = list(set(self.data_keys) - set(self.dummy_columns))
        # dummies.loc[:split_index,num_keys] = scaler.fit(dummies.loc[:split_index,num_keys]).transform(dummies.loc[:split_index,num_keys])
        # dummies.loc[split_index:,num_keys] = scaler.transform(dummies.loc[split_index:,num_keys])
        
        '''
        poly_keys = self.dummy_columns[:]
        poly_keys.remove('Age')
        poly_dummies = pd.get_dummies(self.train_data._append(self.test_data)[poly_keys], columns=poly_keys)
        poly_cols = pd.DataFrame(Poly(degree=3, interaction_only=True).fit(poly_dummies).transform(poly_dummies))
        poly_cols.columns = poly_cols.columns.astype(str)
        dummies = pd.concat([dummies, poly_cols], axis=1)
        '''
        
        xtr, xte, ytr, yte = train_test_split(dummies[:split_index], self.train_data['Transported'])
        if self.name == 'linear_svc':
            # model = LinearSVC(penalty='l2', max_iter=100000).fit(xtr, ytr)
            # model = LinearSVC(C=0.03, penalty='l2', dual=True).fit(xtr, ytr)
            model = LinearSVC(C=0.2, penalty='l1', dual=False).fit(xtr, ytr)
        elif self.name == 'tree_gbc':
            model = GBC(learning_rate=0.1, max_depth=5).fit(xtr, ytr)
        elif self.name == 'core':
            # model = SVC(gamma=0.05, C=3).fit(xtr, ytr)
            # model = SVC(gamma=0.07).fit(xtr, ytr)
            model = SVC(C=2, gamma=0.09).fit(xtr, ytr) # degree 3
        elif self.name == 'tree_rfc':
            model = RFC(max_leaf_nodes=190, n_estimators=50).fit(xtr, ytr)
        print(f'{self.name} train:{model.score(xtr, ytr):.4f}, test:{model.score(xte, yte):.4f}')
        model.fit(dummies[:split_index], targets)
        predictions = model.predict(dummies[split_index:])
        frame = pd.DataFrame({'PassengerId':self.test_data['PassengerId'], 'Transported':predictions})
        frame.to_csv('kaggle/spaceship_titanic/data/submission.csv', index=False)
        
    def linear_svc(self, xtr, xte, ytr, yte):
        '''
        scaler = StandardScaler()
        num_keys = list(set(self.data_keys) - set(self.dummy_columns))
        xtr.loc[:,num_keys] = scaler.fit(xtr[num_keys]).transform(xtr[num_keys])
        xte.loc[:,num_keys] = scaler.transform(xte[num_keys])
        '''
        # xtr = scaler.transform(xtr)
        # xte = scaler.transform(xte)
        # param_C = [0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100]
        # param_C = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1]
        param_C = [0.1, 0.2, 0.3, 0.4, 0.5]
        # param_C = [1]
        # param_C = [0.01, 0.1, 1, 10, 100]
        param_penalty = ['l1']
        scores = []
        cross_scores = []
        for penalty in param_penalty:
            for C in param_C:
                print(f'C:{C}')
                # not cross
                svc = LinearSVC(C=C, penalty=penalty, max_iter=50000, dual=False).fit(xtr, ytr)
                scores.append(f'{penalty}, C:{C}, train:{svc.score(xtr, ytr):.4f}, test:{svc.score(xte, yte):.4f}')
                
                # cross
                # svc = LinearSVC(C=C, penalty=penalty, dual=False)
                # cross_scores.append(f'{penalty}, C:{C}, cross score:{cross_val_score(svc, xtr._append(xte), ytr._append(yte), cv=KFold(n_splits=10, shuffle=True)).mean():.4f}')
        p.pprint({'linear svc cross': cross_scores, 'normal': scores})
        
    def linear_log(self, xtr, xte, ytr, yte):
        param_C = [0.5, 0.75, 1, 3, 5]
        # param_C = [1]
        # param_C = [i/10 for i in range(1,6)]
        scores = []
        scores_cross = []
        
        scaler = StandardScaler()
        # xtr_scaled = scaler.transform(xtr)
        # xte_scaled = scaler.transform(xte)
        num_keys = list(set(self.data_keys) - set(self.dummy_columns))
        xtr.loc[:,num_keys] = scaler.fit(xtr[num_keys]).transform(xtr[num_keys])
        xte.loc[:,num_keys] = scaler.transform(xte[num_keys])
        for C in param_C:
            print(f'C:{C}')
            model = LogReg(C=C, max_iter=10000).fit(xtr, ytr)
            scores.append(f'log C:{C},train:{model.score(xtr, ytr):.4f},test:{model.score(xte, yte):.4f}')
            # log = LogReg(C=C, penalty='l1', max_iter=10000)
            # scores_cross.append(cross_val_score(log, xtr._append(xte), ytr._append(yte),cv=KFold(n_splits=5, shuffle=True)).mean())
        p.pprint(scores)
        # p.pprint({'logreg': scores_cross})
        
    def tree_rfc(self, xtr, xte, ytr, yte):
        param_est = [50]
        # param_est = [3, 5, 7, 9, 12, 15, 20]
        # param_est = [10, 30, 100]
        param_max_depth = [2, 3, 4]
        param_max_leaf = [140, 150, 160, 170, 180, 190, 200]
        scores = []
        for est in param_est:
            # for max_depth in param_max_depth:
            for max_leaf in param_max_leaf:
                # print(f'est:{est}, depth:{max_depth}')
                print(f'rfc est:{est}, leaf:{max_leaf}')
                # model = RFC(max_depth=max_depth, n_estimators=est).fit(xtr, ytr)
                model = RFC(max_leaf_nodes=max_leaf, n_estimators=est, n_jobs=2).fit(xtr, ytr)
                # scores.append(f'est:{est}, depth:{max_depth}, train:{model.score(xtr, ytr)}, test:{model.score(xte, yte)}')
                scores.append(f'est:{est}, leaf:{max_leaf}, train:{model.score(xtr, ytr)}, test:{model.score(xte, yte)}')
        p.pprint(scores)
        
    def tree_gbc(self, xtr, xte, ytr, yte):
        # param_rate = [0.01, 0.05, 0.1, 0.5, 1]
        param_rate = [0.1]
        param_max_depth = [5]
        # param_max_leaf = [10, 20, 30, 40, 50, 70, 100, 150]
        # param_max_leaf = [4, 7, 10, 15, 20]
        param_est = [100]
        scores = []
        for rate in param_rate:
            for depth in param_max_depth:
            # for leaf in param_max_leaf:
            # for est in param_est:
                print(f'rate:{rate},depth:{depth}')
                # print(f'rate:{rate},leaf:{leaf}')
                # print(f'rate:{rate},est:{est}')
                # model = GBC(max_depth=depth, learning_rate=rate).fit(xtr, ytr)
                # model = GBC(max_leaf_nodes=leaf, learning_rate=rate).fit(xtr, ytr)
                # model = GBC(n_estimators=est, learning_rate=0.05, max_depth=5).fit(xtr, ytr)
                model = GBC(max_depth=5, learning_rate=rate).fit(xtr, ytr)
                # scores.append(f'rate:{rate},depth:{depth},train:{model.score(xtr, ytr):.4f},test:{model.score(xte, yte):.4f}')
                # scores.append(f'rate:{rate},leaf:{leaf},train:{model.score(xtr, ytr):.4f},test:{model.score(xte, yte):.4f}')
                # scores.append(f'default rate,est:{est},train:{model.score(xtr, ytr):.4f},test:{model.score(xte, yte):.4f}')
                scores.append(f'rate:{rate}, train:{model.score(xtr, ytr):.4f},test:{model.score(xte, yte):.4f}')
        p.pprint({'gbc':scores})
        
    def mlp(self, xtr, xte, ytr, yte):
        scaler = StandardScaler()
        num_keys = list(set(self.data_keys) - set(self.dummy_columns))
        xtr.loc[:,num_keys] = scaler.fit(xtr[num_keys]).transform(xtr[num_keys])
        xte.loc[:,num_keys] = scaler.transform(xte[num_keys])
        
        param_first = [10, 20, 33, 46, 60, 80, 100, 120, 150, 180, 220, 260, 300]
        scores = []
        for first in param_first:
            print(f'mlp: {first}')
            mlp = MLPC(hidden_layer_sizes=(first, ),max_iter=1000).fit(xtr, ytr)
            scores.append(f'mlp {first},train:{mlp.score(xtr, ytr):.4f},test:{mlp.score(xte, yte):.4f}')
        p.pprint(scores)
        
    def core(self, xtr, xte, ytr, yte):
        scaler = StandardScaler()
        num_keys = list(set(self.data_keys) - set(self.dummy_columns))
        xtr.loc[:,num_keys] = scaler.fit(xtr[num_keys]).transform(xtr[num_keys])
        xte.loc[:,num_keys] = scaler.transform(xte[num_keys])
        
        # param_C = [0.4, 0.7, 1, 1.8, 2.8]
        param_gamma = [0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1]
        param_gamma = [0.07, 0.08, 0.09, 0.1, 0.11]
        param_gamma = [i/100 for i in range(6, 11)]
        # param_gamma = [0.09]
        scores = []
        
        print('core begin')
        # for C in param_C:
        for gamma in param_gamma:
            print(f'gamma:{gamma}')
            # print(f'C:{C}')
            model = SVC(C=1.5, gamma=gamma, cache_size=500).fit(xtr, ytr)
            # model = SVC(C=C, gamma=0.09, cache_size=500).fit(xtr, ytr)
            scores.append(f'gamma:{gamma}, core train:{model.score(xtr, ytr)}, test:{model.score(xte, yte)}')
            # scores.append(f'C:{C}, core train:{model.score(xtr, ytr)}, test:{model.score(xte, yte)}')
        p.pprint({'core':scores})
        
def main():
    Model('tree_gbc').exec()
    
if __name__ == '__main__':
    main()