import numpy as np
import pandas as pd
from sklearn import preprocessing


titanic = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
X_train = titanic.iloc[:, 2:]
X_test = test_data.iloc[:, 1:]
# make a copy of titanic dataset
data1 = titanic.copy()
print(data1.isnull().mean())

# check how many observations we would drop
print('total passengers with values in all variables: ', data1.dropna().shape[0])
print('total passengers in the Titanic: ', data1.shape[0])
print('percentage of data without missing values: ', data1.dropna().shape[0]/ float(data1.shape[0]))

# impute missing values in age in train and test set
median = X_train.Age.median()
for df in [X_train, X_test]:
    df['Age'].fillna(median, inplace=True)
print(X_train['Age'].isnull().sum())

# Missing Value Indicator For Missing Value Indication
X_train['Age_NA'] = np.where(X_train['Age'].isnull(), 1, 0)
X_test['Age_NA'] = np.where(X_test['Age'].isnull(), 1, 0)
print(X_train.head())

print(X_train.Age.mean(), X_train.Age.median())

X_train['Age'].fillna(X_train.Age.median(), inplace=True)
X_test['Age'].fillna(X_train.Age.median(), inplace=True)

print(X_train.head(10))

# One-Hot Encoding
one_hot = preprocessing.OneHotEncoder()
print(one_hot.fit_transform(X_train['Sex'].values.reshape(-1, 1)))

print(pd.get_dummies(X_train['Sex']).head())
print(pd.concat([X_train['Sex'], pd.get_dummies(X_train['Sex'])], axis=1).head())

# Ordinal Encoding
le = preprocessing.LabelEncoder()
print(le.fit_transform(X_train['Sex']))

# numerical scaling
ss = preprocessing.StandardScaler()
mm = preprocessing.MinMaxScaler()
print(mm.fit_transform(X_train['Fare'].values.reshape(-1, 1)))

# binning
bins = [i * 100 for i in range(7)]

cats = pd.cut(X_train['Fare'], bins, labels=[0, 1, 2, 3, 4, 5])
print(cats.values)
