from pyspark import SparkContext, RDD
import os
import numpy as np
from pyspark.mllib.evaluation import RegressionMetrics
from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD
from pyspark.mllib.tree import DecisionTree

os.environ['PYSPARK_PYTHON'] = "python3"

sc = SparkContext.getOrCreate()
rdd = sc.textFile("file:///Users/sonto/Workspace/P1905/spark_example/回归模型/hour.csv").map(lambda line: line.split(","))


def remove_columns(data):
    return data[2:14] + [data[-1]]


# 去掉无用的列与类型转换
rdd = rdd.map(remove_columns).map(lambda pt: [float(p) for p in pt])

# 生成LabeledPoint对象
labeledPointRDD = rdd.map(lambda pt: LabeledPoint(pt[-1], pt[0:-1]))

# 训练模型
model = DecisionTree.trainRegressor(labeledPointRDD, categoricalFeaturesInfo={})

# 评估模型
predicted_values = model.predict(labeledPointRDD.map(lambda pt:pt.features))
actual = labeledPointRDD.map(lambda pt: pt.label)

# predicted_and_true = sc.parallelize([(model.predict(pt.features), pt.label) for pt in labeledPointRDD.collect()])
preds_actual = predicted_values.zip(actual)
print(preds_actual.take(5))

metrics = RegressionMetrics(preds_actual)

# 输出均方误差与平均绝对误差
print(metrics.meanAbsoluteError)
print(metrics.meanSquaredError)
