# !/usr/bin/env python
# -*- coding: utf-8 -*-
from pyspark.mllib.classification import LogisticRegressionWithLBFGS
from pyspark.mllib.evaluation import MulticlassMetrics
from pyspark.sql import SparkSession
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.linalg import Vectors
from pyspark.context import SparkContext

if __name__ == '__main__':
    # 初始化SparkContext
    sc = SparkContext(appName="LogisticRegressionExample")
    # 加载数据集
    data = sc.textFile("../data/iris.txt")

    label_dict = {
        "Setosa": 1,
        "Versicolor": 2,
        "Virginical": 3
    }
    # 解析数据集
    parsedData = data.map(lambda line: LabeledPoint(float(label_dict.get(line.split(',')[4])),
                                                    Vectors.dense(map(float, line.split(',')[0:3]))))

    # 划分训练集和测试集
    training, test = parsedData.randomSplit([0.8, 0.2])

    # 初始化逻辑回归模型
    model = LogisticRegressionWithLBFGS.train(training)

    # 进行预测
    predictions = test.map(lambda point: (point.label, model.predict(point.features)))

    # 评估预测结果
    evaluator = MulticlassMetrics(predictions)
    print("准确率：%.2f%%".format(evaluator.accuracy * 100.0))

    # 停止SparkContext
    sc.stop()
