"""
    机器学习算法：决策树与随机森林
        1、决策树
            一个决策树包含三种类型的节点：
                决策节点：通常用矩阵框来表示
                机会节点：通常用圆圈来表示
                终结点：通常用三角形来表示
            公式：
                N:用N来表示训练用例（样本）的个数，M表示特征数目。
                m:输入特征数目m，用于去顶决策树上的一个节点的决策结果；其中m应该小于M。
                从N个训练用例的（样本）中以有放回抽样的方式，取样N次，形成一个训练及（即bootstrap取样，）并用未抽到的用例（样本）作预测，评估其误差
                对于每一个节点，随机选择m个特征，决策树上每个节点的决定都是基于这些特征确定的。根据这m个特征，计算其最佳的分裂方式
                每棵树都会完整的成长而不会阶段
        2、随机森林

"""
from __future__ import print_function, division
from pyspark.ml.classification import LogisticRegression
from pyspark.sql import SparkSession

from pyspark.mllib.tree import DecisionTree, DecisionTreeModel
from pyspark.mllib.util import MLUtils

spark = SparkSession.builder.master("local").appName("test").enableHiveSupport().getOrCreate()
sc = spark.sparkContext
data = MLUtils.loadLibSVMFile(sc, "E:\\Python\\pyspark_demo01\\pyspark_data\\sample_libsvm_data.txt")

(trainingData, testData) = data.randomSplit([0.7, 0.3])
trainingData.count(), testData.count()

data.take(3)
# 将rdd转为dataFrame
data.toDF().show()

# 建立一个分类器
model = DecisionTree.trainClassifier(trainingData, numClasses=2, categoricalFeaturesInfo={}, impurity='gini',
                                     maxDepth=5, maxBins=32)
"""
trainingData --> 数据
numClasses  --》分几个类
maxDepth    -->分几层

"""

# 检验模型
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testErr = labelsAndPredictions.filter(lambda lp: lp[0] != lp[1]).count() / float(testData.count())

print("误差有多大 = " + str(testErr))
print("Learned classification tree model:")
print(model.toDebugString())

# 将跑出来的模型保存
model.save(sc, "E:\\Python\\pyspark_demo01\\out_dat\\jiqi_model\\jueceshu_model01")

# 使用保存的model
sameNodel = DecisionTreeModel.load(sc, "E:\\Python\\pyspark_demo01\\out_dat\\jiqi_model\\jueceshu_model01")
predictions_from_sameModel = sameNodel.predict(testData.map(lambda x: x.features))
labelsAndPredictions_from_sameModel = testData.map(lambda lp: lp.label).zip(predictions_from_sameModel)
testErr = labelsAndPredictions_from_sameModel.filter(
    lambda lp: lp[0] != lp[1]).count() / float(testData.count())
print("误差有多大 = " + str(testErr))
print("Learned classification tree model:")
print(model.toDebugString())

print(predictions.take(5))
print(predictions_from_sameModel.take(5))
# 对比每个数据都一个样
print(predictions.collect() == predictions_from_sameModel.collect())

# 随机森林
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator

data = spark.read.format("libsvm").load("E:\\Python\\pyspark_demo01\\pyspark_data\\sample_libsvm_data.txt")
(trainingData, testData) = data.randomSplit([0.7, 0.3])

rf = RandomForestClassifier(labelCol="label", featuresCol="features", numTrees=5).fit(trainingData)
"""
labelCol --> 表示名字
numTrees --> 表示几个树
featuresCol --》
"""

predictions = rf.transform(testData)
evaluator = MulticlassClassificationEvaluator(
    labelCol="label", predictionCol="prediction", metricName="accuracy"
)
accuracy = evaluator.evaluate(predictions)
print("出错率：{}".format(1.0 - accuracy))
