# !/usr/bin/env python
# -*- coding: utf-8 -*-

from pyspark.sql import SparkSession
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.feature import VectorAssembler, StringIndexer
from pyspark.ml.regression import LinearRegression

if __name__ == '__main__':
    # creating a spark session
    spark = SparkSession.builder.appName("house_price").master("local[*]").getOrCreate()
    # reading csv files
    train = spark.read.csv("../data/house-prices/train.csv", inferSchema=True,
                           header=True)
    test = spark.read.csv("../data/house-prices/test.csv", inferSchema=True,
                          header=True)
    train.printSchema()

    # String Indexer are used tranform strings into categorical data. We are doing it for only one column here but we
    # can doit for all string data
    indexer = StringIndexer(inputCol="LotShape", outputCol="LotShape2")
    indexed = indexer.fit(train).transform(train)
    indexed.head(1)

    # Assembler combines all integer and create a vector which is used as input to predict. Here we have only
    # selected columns with data type as integer
    assembler = VectorAssembler(inputCols=["MSSubClass", "LotArea", "OverallQual", "OverallCond", "BsmtFinSF1",
                                           "BsmtFinSF2", "BsmtUnfSF", "TotalBsmtSF", "1stFlrSF", "2ndFlrSF",
                                           "LowQualFinSF", "GrLivArea", "BsmtFullBath", "BsmtHalfBath",
                                           "FullBath", "HalfBath", "BedroomAbvGr", "KitchenAbvGr", "TotRmsAbvGrd",
                                           "Fireplaces", "YearBuilt",
                                           "YearRemodAdd", "GarageCars", "GarageArea", "WoodDeckSF", "OpenPorchSF",
                                           "EnclosedPorch", "3SsnPorch", "ScreenPorch", "PoolArea",
                                           "MiscVal", "MoSold", "YrSold", "LotShape2"], outputCol="features")

    # transforming assembler
    output = assembler.transform(indexed)
    final = output.select("features", "SalePrice")
    final.head(3)
    # We will split data into train and validate
    train_df, valid_df = final.randomSplit([0.7, 0.3])
    train_df.describe().show()
    lr = LinearRegression(featuresCol='features', labelCol='SalePrice', maxIter=10,
                          regParam=0.8, elasticNetParam=0.1)
    model = lr.fit(train_df)

    # fitting model of validation set
    validate = model.evaluate(valid_df)
    # let's check how model performed
    print(validate.rootMeanSquaredError)
    print(validate.r2)

    lr_predictions = model.transform(valid_df)
    lr_predictions.select("prediction", "SalePrice", "features").show(5)

    lr_evaluator = RegressionEvaluator(predictionCol="prediction",
                                       labelCol="SalePrice", metricName="r2")
    print("R Squared (R2) on val data = %g" % lr_evaluator.evaluate(lr_predictions))

    spark.stop()
