# -*- coding: utf-8 -*-
# ----------------------------
# @Time    : 2021/9/23 上午11:15
# @Author  : acedar
# @FileName: lr_classifer.py
# ----------------------------

# **** 评估指标：
# ********** areaUnderROC
# ********** areaUnderPR
# ***********

from pyspark.sql import SparkSession
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.evaluation import BinaryClassificationEvaluator, MulticlassClassificationEvaluator

spark = SparkSession.builder.appName("test").getOrCreate()
data_path = "../datasets/mllib/sample_libsvm_data.txt"
data_df = spark.read.format("libsvm").load(data_path)

train_df, test_df = data_df.randomSplit([0.8, 0.2])

lr = LogisticRegression().setMaxIter(100).setRegParam(0.3).setElasticNetParam(0.8)
lrm = lr.fit(train_df)

trs = lrm.summary
print(f"train mse: {trs.accuracy}")
print(f"train rmse: {trs.weightedPrecision}")
print(f"train mae: {trs.weightedRecall}")
print(f"train r2: {trs.weightedFMeasure}")

pred_df = lrm.transform(test_df)

# 标准而分类模型评估
evaluator = BinaryClassificationEvaluator()\
    .setLabelCol("label").setRawPredictionCol("rawPrediction").setMetricName("areaUnderROC")

auc = evaluator.evaluate(pred_df)
print(f"test auc: {auc}")

# 多分类评估也适合而分类
mul_evaluator = MulticlassClassificationEvaluator().setLabelCol("label")\
    .setPredictionCol("prediction").setMetricName("weightedPrecision")
precision = evaluator.evaluate(pred_df)
print(f"test weighted precision: {precision}")


