from pyspark import SparkContext
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.tree import DecisionTree

from point import CLASS_B, CLASS_A
import os

from pyspark.mllib.classification import LogisticRegressionWithSGD, NaiveBayes, SVMWithSGD

os.environ['PYSPARK_PYTHON'] = "python3"

sc = SparkContext.getOrCreate()

labeledPoints = [LabeledPoint(1, pt) for pt in CLASS_A] + [LabeledPoint(0, pt) for pt in CLASS_B]
labeledPointRDD = sc.parallelize(labeledPoints)

# model = LogisticRegressionWithSGD.train(labeledPointRDD)


def compute_correct(labeledPointRDD, model):
    rdd = labeledPointRDD.map(lambda pt: (model.predict(pt.features), pt.label))
    total = rdd.count()
    correct = rdd.filter(lambda pt: pt[0] == pt[1]).count()
    # 正确率
    x = float(correct) / float(total)
    print("正确率：{}".format(x))

    # 错误率
    x = float(total - correct) / float(total)
    print("错误率: {}".format(x))

def comput_2(labeledPointRDD, model):
    rdd = labeledPointRDD.map(lambda pt: (model.predict(pt.features), pt.label))
    correct_Rdd = rdd.filter(lambda pt: pt[0]==1 and pt[0] == pt[1]).count()
    incorrect_Rdd = rdd.filter(lambda pt: pt[0] == 1 and pt[0] != pt[1]).count()
    incorrect_Rdd2 = rdd.filter(lambda pt: pt[0] == 0 and pt[0] != pt[1]).count()

    print("准确率: {}".format(float(correct_Rdd)/float(correct_Rdd + incorrect_Rdd)))
    print("召回率：{}".format(float(correct_Rdd)/float(correct_Rdd + incorrect_Rdd2)))

# compute = comput_2

# print("逻辑回归：")
# compute(labeledPointRDD, model)
#
# print("贝叶斯:")
model = NaiveBayes.train(labeledPointRDD)
# compute(labeledPointRDD, model)
#
# print("SVM:")
# model = SVMWithSGD.train(labeledPointRDD)
# compute(labeledPointRDD, model)

# print("决策树")
# model = DecisionTree.trainClassifier(labeledPointRDD, 2, {})
# predicated_values = sc.parallelize([(model.predict(pt.features), pt.label) for pt in labeledPointRDD.collect()])
# total = predicated_values.count()
# correct = predicated_values.filter(lambda pt: pt[0] == pt[1]).count()
# # 正确率
# x = float(correct) / float(total)
# print("正确率：{}".format(x))
# # 错误率
# x = float(total - correct) / float(total)
# print("错误率: {}".format(x))


# compute_correct(labeledPointRDD, model)

# 查看AUC
metrics = BinaryClassificationMetrics(labeledPointRDD.map(lambda pt: (float(model.predict(pt.features)), pt.label)))
print(metrics.areaUnderROC)
print(metrics.areaUnderPR)