#data analyzing and wrangling
import pandas as pd
import numpy as np
import random as rnd

data_train=pd.read_csv("data/train.csv")
data_test=pd.read_csv("data/test.csv")

data_train.head()

print(data_train.info())
print('_'*40)
print(data_test.info())

print('_'*40)
print(data_train.describe())
print('_'*40)
print(data_train.describe(include=['O']))

# visualizing tools （results has been displayed in the notebook）
import seaborn as sns
import matplotlib.pyplot as plt
## age feature visualization
#age=sns.FacetGrid(data_train, col='Survived')
#age.map(plt.hist, 'Age', bins=20)
## pclass feature visualization
#pclass = sns.FacetGrid(data_train, col='Survived', row='Pclass', height=2.2, aspect=1.6)
#pclass.map(plt.hist, 'Age', alpha=.5, bins=20)
#pclass.add_legend();# adding explanation for the graph
## embarked feature visualization
#embark = sns.FacetGrid(data_train, col='Embarked', size=2.2, aspect=1.2)
#embark.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep')
#embark.add_legend()

# data wrangling process
# drop the 'ticket'&'cabin'feature out of dataset
data_train = data_train.drop(['Ticket', 'Cabin'], axis=1)
data_test = data_test.drop(['Ticket', 'Cabin'], axis=1)
#drop the 'name'&'passengerid'feature out of dataset
data_train = data_train.drop(['Name', 'PassengerId'], axis=1)
data_test = data_test.drop(['Name', 'PassengerId'], axis=1)
# convert sex feature into numerical value
combine=[data_train,data_test] # conduct operations in two datasets altogether
for dataset in combine:
    dataset['Sex'] = dataset['Sex'].map( {'female': 1, 'male': 0} ).astype(int)

# guess the empty age values through pclass and gender
guess_age = np.zeros((2, 3))
for dataset in combine:
    for i in range(0, 2):
        for j in range(0, 3):
            # clear the null value in certain sex and pclass cluster
            guess = dataset[(dataset['Sex'] == i) & \
                            (dataset['Pclass'] == j + 1)]['Age'].dropna()
            age_pred = guess.median()  # find median number of certain group

            # the guess age of certain group
            guess_age[i, j] = int(age_pred / 0.5 + 0.5) * 0.5

    for i in range(0, 2):
        for j in range(0, 3):
            # fill the missing age values
            # according to the group of each passengers
            dataset.loc[(dataset.Age.isnull()) & (dataset.Sex == i) & \
                        (dataset.Pclass == j + 1), 'Age'] = guess_age[i, j]
            # turn the age value into integer
    dataset['Age'] = dataset['Age'].astype(int)

# surviving rate calculated in 5 age bands
print('_'*40)
data_train['AgeBand'] = pd.cut(data_train['Age'], 5)
print(data_train[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False)\
.mean().sort_values(by='AgeBand', ascending=True))
# divide the age number in dataset into five age groups
# i.e. convert age feature from continuous number into discretized numbers
for dataset in combine:
    dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
    dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
    dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
    dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
    dataset.loc[ dataset['Age'] > 64, 'Age']
# we can remove age band after use
data_train = data_train.drop(['AgeBand'], axis=1)
combine = [data_train, data_test]

# fill the 'embarked' feature with most common occurence
# find the most frequently occured embarked port
freq_port = data_train.Embarked.dropna().mode()[0]
# replace null value in train dataset
for dataset in combine:
    dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
# surviving rate across embarked feature after completing
print('_'*40)
print(data_train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False)\
.mean().sort_values(by='Survived', ascending=False))
# convert embarked feature to numerical values
for dataset in combine:
    dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)

# fill the single missing fare value in test set using median
data_test['Fare'].fillna(data_test['Fare'].dropna().median(), inplace=True)
## cut the fare feature into 4 fare bands
#data_train['FareBand'] = pd.qcut(data_train['Fare'], 4)
#data_train[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False)\
#.mean().sort_values(by='FareBand', ascending=True)

# operating the dataset
# boundray of each band has been accquired above
for dataset in combine:
    dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
    dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
    dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare']   = 2
    dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
    dataset['Fare'] = dataset['Fare'].astype(int)
#data_train = data_train.drop(['FareBand'], axis=1)
combine = [data_train,data_test]

# creating new feature 'alone'
for dataset in combine:
    dataset['famlies'] = dataset['SibSp'] + dataset['Parch'] + 1
for dataset in combine:
    dataset['IsAlone'] = 0
    dataset.loc[dataset['famlies'] == 1, 'IsAlone'] = 1
# drop the 'families' 'sibsp' & 'parch' features
data_train = data_train.drop(['Parch', 'SibSp', 'famlies'], axis=1)
data_test = data_test.drop(['Parch', 'SibSp', 'famlies'], axis=1)
combine = [data_train, data_test]

# dataset appearance after wrangling
print('_'*40)
print("Dataset After Wrangling")
print('data_train')
print(data_train.head())
print('data_test')
print(data_test.head())
print('_'*40)

# machine learning algorithms
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import Perceptron
from sklearn.ensemble import RandomForestClassifier

# define the array we use in model training
X_train=data_train.drop("Survived", axis=1)
Y_train=data_train["Survived"]
X_test=data_test.copy()

# logistic regression
logregression=LogisticRegression()
logregression.fit(X_train,Y_train)
Y_pred = logregression.predict(X_test)
acc_logreg = round(logregression.score(X_train, Y_train) * 100, 2)
print("Logistic Regression: ",acc_logreg)

# KNN
knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_test)
acc_knn = round(knn.score(X_train, Y_train) * 100, 2)
print("KNN: ",acc_knn)

# SVM
linear_svc = LinearSVC()
linear_svc.fit(X_train, Y_train)
Y_pred = linear_svc.predict(X_test)
acc_linear_svc = round(linear_svc.score(X_train, Y_train) * 100, 2)
print("Support Vector Machine: ",acc_linear_svc)

# Perceptron
perceptron = Perceptron()
perceptron.fit(X_train, Y_train)
Y_pred = perceptron.predict(X_test)
acc_perceptron = round(perceptron.score(X_train, Y_train) * 100, 2)
print("Perceptron: ",acc_perceptron)

# Random Forest
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
random_forest.score(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)
print("Random Forest: ",acc_random_forest)

# The last y_pred belongs to random forest
submission_test=pd.read_csv("data/test.csv")
submission = pd.DataFrame({
        "PassengerId": submission_test["PassengerId"],
        "Survived": Y_pred })
submission.to_csv('submission.csv')