#!/usr/bin/env python
# coding: utf-8

# In[1]:


import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import graphviz
import missingno as msno
from pywaffle import Waffle

from sklearn import preprocessing

from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC


# In[2]:


for dirname, _, filenames in os.walk('/input'):
    for filename in filenames:
        print(os.path.join(dirname, filename))


# In[3]:


train_path = r"D:/Code/Kaggle/1_Titanic/input/train.csv"
test_path = r"D:/Code/Kaggle/1_Titanic/input/test.csv"
train = pd.read_csv(train_path)
test = pd.read_csv(test_path)
train.head()


# In[4]:


train.info()

print('--------')
print('Percentage of NA per property sorted')
print('--------')
p = (train.isna().sum() / len(train) * 100).sort_values(ascending=False)
print(p)
print('--------')
print('Unique values for duplications and other useful info')
print('--------')
u = train.nunique().sort_values()
print(u)


# In[5]:


train['Embarked'].value_counts()


# In[6]:


def cleanData(data):
    data.drop(['Cabin', 'Name', 'Ticket'], axis=1, inplace=True)
    data['Age'] = data.groupby(['Pclass', 'Sex'])['Age'].transform(lambda x : x.fillna(x.median()))
    data['Fare'] = data.groupby(['Pclass', 'Sex'])['Fare'].transform(lambda x : x.fillna(x.median()))
    data.dropna(axis=0, subset=['Embarked'], inplace=True)
    le = preprocessing.LabelEncoder()
    data['Sex'].replace({'male':0, 'female':1}, inplace=True)
    data['Embarked'].replace({'S':0, 'C':1, 'Q':2}, inplace=True)
    return data


# In[7]:


clean_train = cleanData(train)
clean_test = cleanData(test)


# In[8]:


clean_train.info()
clean_test.info()


# In[9]:


y = train['Survived']
X = pd.get_dummies(train.drop('Survived',axis=1))

X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)


# In[10]:


def fitAndPredict(model):
    model.fit(X_train, y_train)
    prediction = model.predict(X_val)
    return accuracy_score(y_val, prediction)


# In[11]:


model1 = LogisticRegression(solver='liblinear', random_state=42)
model2 = GradientBoostingClassifier()
model3 = RandomForestClassifier()
model4 = SGDClassifier()
model5 = SVC()

models = [model1, model2, model3, model4, model5]
i = 0
for model in models:
    i += 1
    print("Model", i, ":", model)
    print("ACC: ", fitAndPredict(model))


# In[12]:


model = GradientBoostingClassifier(min_samples_split=20, min_samples_leaf=60, max_depth=3, max_features=7)
fitAndPredict(model)


# In[13]:


predict = model2.predict(pd.get_dummies(clean_test))

output = pd.DataFrame({'PassengerId': clean_test.PassengerId, 'Survived': predict})
output.to_csv('my_submission.csv', index=False)
print("Submission saved")

