from pyspark import SparkContext, RDD
import os
import numpy as np
from pyspark.mllib.evaluation import RegressionMetrics
from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD

os.environ['PYSPARK_PYTHON'] = "python3"

sc = SparkContext.getOrCreate()
rdd = sc.textFile("file:///Users/sonto/Workspace/P1905/spark_example/回归模型/hour.csv").map(lambda line: line.split(","))


def remove_columns(data):
    return data[2:14] + [data[-1]]


# 去掉无用的列
rdd = rdd.map(remove_columns)


# 获得指定列类型特征
def get_mapping(rdd, i):
    assert isinstance(rdd, RDD)
    return rdd.map(lambda data: data[i]).distinct().zipWithIndex().collectAsMap()


# 获得前8列的类型性特征
mappings = sc.broadcast([get_mapping(rdd, i) for i in range(8)])


def to_index(i, value):
    feature_map = mappings.value[i]
    features = np.zeros(len(feature_map))
    features[feature_map[value]] = 1
    return features


def extract_features(features):
    mapped_features = []
    for i in range(8):
        mapped_features.extend(to_index(i, features[i]))

    mapped_features.extend([float(x) for x in features[8:]])
    return mapped_features


# 特征提取与创建LabeledPoint对象
labeledPoint = rdd.map(extract_features).map(lambda pt: LabeledPoint(pt[-1], pt[0: -1]))

pt = labeledPoint.first()
print(pt.features, pt.label)
model = LinearRegressionWithSGD.train(labeledPoint)

metrics = RegressionMetrics(labeledPoint.map(lambda pt: (float(model.predict(pt.features)), pt.label)))
print(metrics.meanSquaredError, metrics.meanAbsoluteError)

# print(model.predict(pt.features), pt.label)

# print(mappings)
# print(get_mapping(rdd, 0))

"""
a = ['A', 'B', 'C']
# 
# 1 of K

A => [1, 0, 0]
B => [0, 1, 0]
C => [0, 0, 1]


"""
