# coding=utf-8
# encoding:utf-8
from pyspark import SparkConf, RDD
from pyspark.ml.linalg import Vectors
from pyspark.ml.regression import LinearRegression, AFTSurvivalRegression
from pyspark.mllib.regression import LinearRegressionWithSGD, LabeledPoint
from pyspark.sql import SparkSession

if __name__ == '__main__':
    linear = [1.0, 1.0, 2.0, 1.0, 3.0, -1.0, 1.0, -2.0]
    vd = Vectors.dense(linear)
    print(type(vd), vd)
    # exit(0)

    conf = SparkConf()
    conf = conf.setMaster("local[8]")
    conf = conf.set('spark.sql.warehouse.dir', 'file:///d:/tmp')
    sparkS = SparkSession.builder.appName("Test PredictionTool").config(conf=conf).getOrCreate()
    sc = sparkS.sparkContext
    sc.setLogLevel("ERROR")

    # exit(0)

    """
    ========================== Linear Regression =============================
    """

    training = sparkS.read.format("libsvm") \
        .load("data/sample_linear_regression_data.txt")
    training.show(truncate=False)
    print(training.dtypes)
    print(type(training))
    lr = LinearRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8)

    # Fit the model
    lrModel = lr.fit(training)

    # Print the coefficients and intercept for linear regression
    print("Coefficients: " + str(lrModel.coefficients))
    print("Intercept: " + str(lrModel.intercept))

    print(type(lr))
    print(type(lrModel))

    # exit(0)

    print("""
    ===============================================================
    """)

    # Load and parse the data
    def parse_point(line):
        values = [float(x) for x in line.replace(',', ' ').split(' ')]
        return LabeledPoint(values[0], values[1:])


    data = sc.textFile("data/linear_data_y_x.csv")
    parsedData = data.map(parse_point)
    print(type(parsedData))
    for i in parsedData.collect():
        print(i)

    # Build the model
    model = LinearRegressionWithSGD.train(parsedData, iterations=100, step=0.00000001)

    # Evaluate the model on training data
    valuesAndPreds = parsedData.map(lambda p: (p.label, model.predict(p.features)))
    assert isinstance(parsedData, RDD)

    MSE = valuesAndPreds.map(lambda (v, p): (v - p) ** 2).reduce(lambda x, y: x + y) / valuesAndPreds.count()
    print("Mean Squared Error = " + str(MSE))

    for i in range(-50, 50, 1):
        t = tuple([i])
        result = model.predict(t)  # 塞进一个tuple当成X，然后给出一个预测
        print('x:', i, 'y:', result)
    exit(0)

    """
    AFTSurvival -------------------
    """

    training = sparkS.createDataFrame([
        (1.218, 1.0, Vectors.dense(1.560, -0.605)),
        (2.949, 0.0, Vectors.dense(0.346, 2.158)),
        (3.627, 0.0, Vectors.dense(1.380, 0.231)),
        (0.273, 1.0, Vectors.dense(0.520, 1.151)),
        (4.199, 0.0, Vectors.dense(0.795, -0.226))], ["label", "censor", "features"])
    quantile_probabilities = [0.3, 0.6]
    aft = AFTSurvivalRegression(quantileProbabilities=quantile_probabilities,
                                quantilesCol="quantiles")

    model = aft.fit(training)

    # Print the coefficients, intercept and scale parameter for AFT survival regression
    print("Coefficients: " + str(model.coefficients))
    print("Intercept: " + str(model.intercept))
    print("Scale: " + str(model.scale))
    model.transform(training).show(truncate=False)
