import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.impute import SimpleImputer

# Read the data
X_full = pd.read_csv('../../data/housing_train.csv', index_col='Id')
X_test_full = pd.read_csv('../../data/housing_test.csv', index_col='Id')

# Remove rows with missing target, separate target from predictors
print(X_full.shape)
X_full.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = X_full.SalePrice
X_full.drop(['SalePrice'], axis=1, inplace=True)

# To keep thins simple, we'll use only numerical predictors
X = X_full.select_dtypes(exclude=['object'])
X_test = X_test_full.select_dtypes(exclude=['object'])
print(X.shape)
print(X_test.shape)

X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
print(X_train.head())

# Shape of training data(num_rows, num_columns)
print(X_train.shape)

# Number of missing values in each column of training data
missing_val_count_by_column = (X_train.isnull().sum())
print(missing_val_count_by_column[missing_val_count_by_column > 0])

# Fill in the line below: How many rows are int the training data?
num_rows = 1168
# How many columns in the training data have missing values ?
num_cols_with_missing = 3
# How many missing entries are contained int all the training data?
tot_missing = 276


def score_dataset(X_train, X_valid, y_train, y_valid):
    model = RandomForestRegressor(n_estimators=100, random_state=0)
    model.fit(X_train, y_train)
    predicts = model.predict(X_valid)
    return mean_absolute_error(y_valid, predicts)


# --- First method: Drop columns with missing values ---
# get names of columns with missing values
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
print(cols_with_missing)
# drop columns in training and validation data
reduced_X_train = X_train.copy().drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.copy().drop(cols_with_missing, axis=1)
print("MAE (Drop columns with missing values)")
print(score_dataset(reduced_X_train, reduced_X_valid, y_train, y_valid))

# --- Second method : imputation missing values with AVERAGE strategy ---
imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(imputer.fit_transform(X_train.copy()))
imputed_X_valid = pd.DataFrame(imputer.transform(X_valid.copy()))
# imputation removed columns names; put them back
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
print("MAE (Imputed with average strategy)")
print(score_dataset(imputed_X_train, imputed_X_valid, y_train, y_valid))

# --- Third method : imputation missing values with MEDIAN strategy ---
# The SimpleImputer's default algorithm to fill missing value with average
# is not better than just drop the column. So, we try to use MEDIAN strategy and try again
# Preprocessed training and validation features
final_imputer = SimpleImputer(strategy='median')
final_X_train = pd.DataFrame(final_imputer.fit_transform(X_train.copy()))
final_X_valid = pd.DataFrame(final_imputer.transform(X_valid.copy()))
final_X_train.columns = X_train.columns
final_X_valid.columns = X_valid.columns
print("MAE (Imputed with median strategy)")
print(score_dataset(final_X_train, final_X_valid, y_train, y_valid))


