from pyspark import SparkContext, RDD
import os

os.environ["PYSPARK_PYTHON"] = "python3"

sc = SparkContext.getOrCreate()
rdd = sc.textFile("file:///Users/sonto/Workspace/P1905/spark_example/examples/回归模型/hour-noheader.csv").map(
    lambda line: line.split(","))


def remove_columns(data):
    assert isinstance(data, list)
    new_data = data[2:14] + [data[-1]]
    return new_data

def get_mapping(rdd, index):
    assert isinstance(rdd, RDD)
    return rdd.map(lambda f:f[index]).distinct().zipWithIndex().collectAsMap()

rdd = rdd.map(remove_columns)

mappings = [get_mapping(rdd, i) for i in range(0, 7)]

import numpy as np

print(mappings)
def map_index(i, v):
    print(i, v)
    val_map = mappings[i]
    arr = np.zeros(len(val_map))
    arr[val_map[v]] = 1
    return arr

def extract_features(features):
    feats = []
    for i in range(0, 7):
        feats.extend(map_index(i, features[i]))
    return feats + features[7:]


print(rdd.map(extract_features).first())
# print(map_index(0, '2'))
