from __future__ import print_function
from pyspark.ml.classification import LogisticRegression
from pyspark.sql import SparkSession

if __name__ == '__main__':
    libsvm_file_name = "../resources/data_format1/data_format2/train_format1.txt"
    test_file_name="../resources/data_format1/data_format2/test_format1.txt"
    spark = SparkSession \
        .builder \
        .appName("LogisticRegression") \
        .getOrCreate()
    test=spark.read.format("libsvm").load(test_file_name)
    test.printSchema()
    # training = spark.read.format("libsvm").load(libsvm_file_name)
    # lr = LogisticRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8)
    # lrModel = lr.fit(training)
    # trainingSummary = lrModel.summary

    # # Obtain the objective per iteration
    # objectiveHistory = trainingSummary.objectiveHistory
    # print("objectiveHistory:")
    # for objective in objectiveHistory:
    #     print(objective)

    # # Obtain the receiver-operating characteristic as a dataframe and areaUnderROC.
    # trainingSummary.roc.show()
    # print("areaUnderROC: " + str(trainingSummary.areaUnderROC))

    # # Set the model threshold to maximize F-Measure
    # fMeasure = trainingSummary.fMeasureByThreshold
    # maxFMeasure = fMeasure.groupBy().max('F-Measure').select('max(F-Measure)').head()
    # bestThreshold = fMeasure.where(fMeasure['F-Measure'] == maxFMeasure['max(F-Measure)']) \
    #     .select('threshold').head()['threshold']
    # lr.setThreshold(bestThreshold)
    # $example off$

    spark.stop()