# coding=utf8
import sys

from numpy.ma import array
from pyspark.ml import Pipeline
from pyspark.ml.feature import VectorIndexer, StringIndexer, IndexToString
from pyspark.ml.linalg import Vectors
from pyspark.mllib.linalg import DenseVector, SparseVector
from pyspark.mllib.regression import LabeledPoint
from pyspark.sql import SparkSession
from pyspark import SQLContext, Row

from pyspark.ml.classification import DecisionTreeClassificationModel, DecisionTreeClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator


def get_spark_inc():
    spark = SparkSession \
        .builder \
        .master("local[*]") \
        .appName("Single_KNN") \
        .config("spark.sql.warehouse.dir", sys.path[0] + "/spark-warehouse", ) \
        .config("spark.sql.shuffle.partitions", 6) \
        .config("spark.work.memory", "16g") \
        .config("spark.work.cores", 12) \
        .getOrCreate()
    return spark


def load_mysql_data(spark):
    df = spark.read.format('jdbc').options(url="jdbc:mysql://175.102.18.112:3306/kd_xapi?user=root&password=new.123",
                                           dbtable="std_info").load()
    return df


def df_write_to_mysql(df, table_name):
    df.write.mode(saveMode="append").format("jdbc").options(
        url="jdbc:mysql://175.102.18.112:3306/kd_xapi?user=root&password=new.123",
        dbtable="portrait_portraitxapipredictstdinfo",
        user="root",
        password="new.123").save()


def process_registered(x):
    """
    0-2 分布40%
    3-5 分布30%
    5+ 分布30%
    :param x:
    :return:
    """
    if int(x['registered']) < 2:
        return Row(id=x['id'], registered_p=0)
    elif int(x['registered']) >= 2 and int(x['registered']) < 5:
        return Row(id=x['id'], registered_p=1)
    else:
        return Row(id=x['id'], registered_p=2)


def process_accessed(x):
    """
    =0 分布 78%
    >0 22%
    :param x:
    :return:
    """
    if int(x['accessed']) == 0:
        return Row(id=x['id'], accessed_p=0)
    else:
        return Row(id=x['id'], accessed_p=1)


def process_watched(x):
    """
    =0 分布 80%+
    :param x:
    :return:
    """
    if int(x['watched']) < 1:
        return Row(id=x['id'], watched_p=0)
    else:
        return Row(id=x['id'], watched_p=1)


def process_submitted(x):
    """
    0-1 分布37%
    2-6 分布32%
    7+  分布31%
    :param x:
    :return:
    """
    if int(x['submitted']) < 2:
        return Row(id=x['id'], submitted_p=0)
    elif int(x['submitted']) >= 2 and int(x['submitted']) < 7:
        return Row(id=x['id'], submitted_p=1)
    else:
        return Row(id=x['id'], submitted_p=2)


def process_average_score(x):
    """
    0-60 为差
    60-96 为一般
    96+  优秀
    :param x:
    :return:
    """
    if int(x['average_score']) < 60:
        return Row(id=x['id'], average_score_p=0)
    elif int(x['average_score']) >= 60 and int(x['average_score']) < 90:
        return Row(id=x['id'], average_score_p=1)
    else:
        return Row(id=x['id'], average_score_p=2)


def toVectors(x):
    rel = {}
    rel['features'] = Vectors.dense(
        [x['registered'], x['submitted'], x['posted'], x['watched']])
    rel['label'] = str(x['average_score_p'])
    rel['id'] = x['id']
    return rel


def toVectorsPlus(x):
    rel = {}
    rel['features'] = Vectors.dense(
        [x['registered'], x['submitted'], x['posted'], x['watched']]
    )
    rel['label'] = x['average_score_p']
    rel['id'] = x['id']
    return rel


def predict_by_dt(df2, df_test_short):
    # 训练决策树模型,这里我们可以通过setter的方法来设置决策树的参数，也可以用ParamMap来设置（具体的可以查看spark mllib的官网）。具体的可以设置的参数可以通过explainParams()来获取。
    dtClassifier = DecisionTreeClassifier().setLabelCol("indexedLabel").setFeaturesCol("indexedFeatures")
    # 在pipeline中进行设置
    pipelinedClassifier = Pipeline().setStages([labelIndexer, featureIndexer, dtClassifier, labelConverter])
    # 训练决策树模型
    modelClassifier = pipelinedClassifier.fit(df2)
    # 进行预测
    predictionsClassifier_all = modelClassifier.transform(df2)
    predictionsClassifier_test = modelClassifier.transform(df_test_short)
    # 查看部分预测的结果
    predictions_all = predictionsClassifier_all.select("id", "predictedLabel", "label", "features")
    predictions_test = predictionsClassifier_test.select("id", "predictedLabel", "label", "features")
    # 将数据合并
    df_train = df1.join(predictions_all, "id")
    df_test = df_test_back.join(predictions_test, "id")
    # 组合最终数据
    df_predict = df_train.union(df_test)
    # 将数据写回mysql
    df_predict = df_predict.selectExpr("id", "predictedLabel as dt_predict_level")
    evaluatorClassifier = MulticlassClassificationEvaluator().setLabelCol("indexedLabel").setPredictionCol(
        "prediction").setMetricName("accuracy")
    accuracy = evaluatorClassifier.evaluate(predictionsClassifier_all)
    print("Test Error = " + str(1.0 - accuracy))
    treeModelClassifier = modelClassifier.stages[2]
    print("Learned classification tree model:\n" + str(treeModelClassifier.toDebugString))
    return df_predict


def predict_by_rf(df2, df_test_short):
    from pyspark.ml import Pipeline
    from pyspark.ml.classification import RandomForestClassifier
    from pyspark.ml.evaluation import MulticlassClassificationEvaluator

    # Split the data into training and test sets (30% held out for testing)

    # Train a RandomForest model.
    rf = RandomForestClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures", numTrees=10)

    labelConverter = IndexToString(inputCol="prediction", outputCol="predictedLabel",
                                   labels=labelIndexer.labels)

    # Chain indexers and forest in a Pipeline
    pipeline = Pipeline(stages=[labelIndexer, featureIndexer, rf, labelConverter])

    # Train model.  This also runs the indexers.
    model = pipeline.fit(df2)

    # Make predictions.
    predictions_all = model.transform(df2)
    predictions_test = model.transform(df_test_short)
    # Select (prediction, true label) and compute test error
    evaluator = MulticlassClassificationEvaluator(
        labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
    accuracy = evaluator.evaluate(predictions_all)
    print("Test Error = %g" % (1.0 - accuracy))
    # 将数据合并
    df_train = df1.join(predictions_all, "id")
    df_test = df_test_back.join(predictions_test, "id")
    # 组合最终数据
    df_predict = df_train.union(df_test)
    df_predict = df_predict.selectExpr("id", "predictedLabel as rf_predict_level")
    # df_predict.filter("average_score = -1").orderBy("submitted", ascending=False).show()
    rfModel = model.stages[2]
    print(rfModel)  # summary only
    return df_predict


def transform_labeled_pointed_0(x):
    """
    将svm多分类变成2次2分类的 0 和 1,2 分类
    :param x:
    :return:
    """
    if int(x['label']) < 1:
        return LabeledPoint(0.0, x['features'].toArray())
    else:
        return LabeledPoint(1.0, x['features'].toArray())


def transform_labeled_pointed_1(x):
    """
    将svm多分类变成2次2分类的 1 和 0,2 分类
    :param x:
    :return:
    """
    if int(x['label']) == 1:
        return LabeledPoint(0.0, x['features'].toArray())
    else:
        return LabeledPoint(1.0, x['features'].toArray())


def transform_labeled_pointed_2(x):
    """
    将svm多分类变成2次2分类的 1 和 0,2 分类
    :param x:
    :return:
    """
    if int(x['label']) == 2:
        return LabeledPoint(0.0, x['features'].toArray())
    else:
        return LabeledPoint(1.0, x['features'].toArray())





def predict_by_svm(df2, df_test_short):
    from pyspark.mllib.classification import SVMModel, SVMWithSGD
    # 0和大于0的分类
    transform_rdd_0 = df2.rdd.map(transform_labeled_pointed_0)
    transform_rdd_1 = df2.rdd.map(transform_labeled_pointed_1)
    transform_rdd_2 = df2.rdd.map(transform_labeled_pointed_2)

    # 分别实现svm的训练
    svm_0 = SVMWithSGD.train(transform_rdd_0, iterations=100)
    svm_1 = SVMWithSGD.train(transform_rdd_1, iterations=100)
    svm_2 = SVMWithSGD.train(transform_rdd_2, iterations=100)

    # 然后分别通过svm进行预测
    predict_0 = df2.rdd.map(lambda y: Row(id=y['id'], label=y['label'], features=y['features'],
                                          prediction_0=svm_0.predict(y['features'].toArray()))).toDF()
    predict_1 = df2.rdd.map(lambda y: Row(id=y['id'], features=y['features'],
                                          prediction_1=svm_1.predict(y['features'].toArray()))).toDF()
    predict_2 = df2.rdd.map(lambda y: Row(id=y['id'], features=y['features'],
                                          prediction_2=svm_2.predict(y['features'].toArray()))).toDF()
    svm_0.clearThreshold()
    svm_1.clearThreshold()
    svm_2.clearThreshold()

    # 获取svm预测值
    predict_0 = predict_0.rdd.map(
        lambda y: Row(id=y['id'], label=y['label'], prediction_0=y['prediction_0'], features=y['features'],
                      prediction_0_value=round(float(svm_0.predict(y['features'].toArray())), 2))).toDF()
    predict_1 = predict_1.rdd.map(lambda y: Row(id=y['id'], prediction_1=y['prediction_1'],
                                                prediction_1_value=float(
                                                    svm_1.predict(y['features'].toArray())))).toDF()
    predict_2 = predict_2.rdd.map(lambda y: Row(id=y['id'], prediction_2=y['prediction_2'],
                                                prediction_2_value=float(
                                                    svm_2.predict(y['features'].toArray())))).toDF()

    # 开始做排序规则：
    def sort_predict_last(x):
        predict_label_array = [x['prediction_0'], x['prediction_1'], x['prediction_2']]
        predict_value_array = [x['prediction_0_value'], x['prediction_1_value'], x['prediction_2_value']]

        try:
            index = predict_label_array.index(0)
            print(predict_label_array, predict_value_array, index)
            return Row(predict_label=index, id=x['id'])
        except Exception as e:
            # 表示里面没有0
            tmp_array = predict_value_array
            tmp_array.sort(reverse=True)
            # 然后再根据tmp_array的顺序来判断
            first_index = predict_value_array.index(tmp_array[0])
            second_index = predict_value_array.index(tmp_array[1])
            info_first = [0, 1, 2]
            info_first.pop(first_index)
            info_second = [0, 1, 2]
            info_second.pop(second_index)
            # 去2者的交集边可以得出预测值
            predict_label = [val for val in info_first if val in info_second][0]
            print(predict_label_array, predict_value_array, predict_label)
            return Row(predict_label=predict_label, id=x['id'])

    # 然后将结果汇总
    df_predict = predict_0.join(predict_1, "id").join(predict_2, "id")
    df_predict.filter("prediction_0 = 1 and prediction_1 = 1 and prediction_2 = 1").show(100)
    # predict_results = df_predict.rdd.map(lambda x: sort_predict_last(x)).toDF()
    # df_predict = df_predict.join(predict_results, "id").select("id", "predict_label", "label")
    # df_predict.filter("predict_label != 2").show(100)

    return df_predict



def predict_by_nb(df2, df_test_short):
    from pyspark.ml.classification import NaiveBayes
    from pyspark.ml.evaluation import MulticlassClassificationEvaluator

    df2.show()

    trainingData, testData = df2.randomSplit([0.7, 0.3])

    nb = NaiveBayes(smoothing=1.0, modelType="multinomial")
    model = nb.fit(trainingData)

    predictions = model.transform(testData)
    predictions.show()

    # compute accuracy on the test set
    evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction",
                                                  metricName="accuracy")
    accuracy = evaluator.evaluate(predictions)
    print("Test set accuracy = " + str(accuracy))


# 初始化spark对象
spark = get_spark_inc()
# 提取所需数据
df = load_mysql_data(spark)
# 从原始数据中提取需要的数据
df1 = df.select("id", "registered", "accessed", "watched", "submitted", "downloaded", "posted", "average_score")
df_back = df1
df_test_back = df1.filter(" average_score = -1 ")
df1 = df1.filter(" average_score > -1 ")

if __name__ == "__main__":
    # average_score
    processed_df = df1.select("id", "average_score")
    processed_df = processed_df.rdd.map(process_average_score).toDF()
    df1 = df1.join(processed_df, "id")
    # average_score
    processed_df = df_test_back.select("id", "average_score")
    processed_df = processed_df.rdd.map(process_average_score).toDF()
    df_test_back = df_test_back.join(processed_df, "id")
    # average_score
    processed_df = df_back.select("id", "average_score")
    processed_df = processed_df.rdd.map(process_average_score).toDF()
    df_back = df_back.join(processed_df, "id")

    df1.cache()
    df_test_back.cache()
    selected_df = df1.select("id", "registered", "accessed", "watched", "submitted", "downloaded", "posted",
                             "average_score_p")
    df2 = selected_df.rdd.map(lambda x: Row(**toVectors(x))).toDF()
    # 准备测试数据集
    df_test_short = df_test_back.rdd.map(lambda x: Row(**toVectors(x))).toDF()
    # df2.show()
    # 转换成功
    labelIndexer = StringIndexer().setInputCol("label").setOutputCol("indexedLabel").fit(df2)
    featureIndexer = VectorIndexer().setInputCol("features").setOutputCol("indexedFeatures").fit(df2)
    labelConverter = IndexToString().setInputCol("prediction").setOutputCol("predictedLabel").setLabels(
        labelIndexer.labels)
    df_dt_predict = predict_by_dt(df2, df_test_short)
    print(df_dt_predict.count())
    df_rf_predict = predict_by_rf(df2, df_test_short)
    print(df_rf_predict.count())
    # df_svm_predict = predict_by_svm(df2, df_test_short)
    # df_predict = predict_by_nb(df3, df_test_short)
    # df_predict = df2.join(df_dt_predict, "id").join(df_rf_predict, "id")
    df_results = df_back.join(df_dt_predict, "id").join(df_rf_predict, "id")
    df_results = df_results.selectExpr("id as std_id", "registered", "accessed", "watched", "submitted", "downloaded",
                          "posted", "average_score", "average_score_p as real_level",
                          "dt_predict_level", "rf_predict_level")
    df_results.show()
    print(df_results.count())
    # df_write_to_mysql(df_results, "portrait_portraitxapipredictstdinfo")

