from pyspark import SparkContext
from pyspark.mllib.classification import LogisticRegressionWithSGD, NaiveBayes
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.mllib.feature import StandardScaler
from pyspark.mllib.linalg.distributed import RowMatrix
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.tree import DecisionTree
NaiveBayes

from point import CLASS_A, CLASS_B
import os

os.environ['PYSPARK_PYTHON'] = "python3"

sc = SparkContext.getOrCreate()

DecisionTree.tr

labeled_points = [LabeledPoint(1, pt) for pt in CLASS_A] + [LabeledPoint(0, pt) for pt in CLASS_B]
labeldPointRDD = sc.parallelize(labeled_points)
model = LogisticRegressionWithSGD.train(labeldPointRDD)
scoreAndLabels = labeldPointRDD.map(lambda pt: (float(model.predict(pt.features)), pt.label))
scoreAndLabels.foreach(print)
metric = BinaryClassificationMetrics(scoreAndLabels)
# # AUC（ROC曲线）
# print(metric.areaUnderROC)
# # 准确率-召唤率曲线
# print(metric.areaUnderPR)
#
featureRDD = labeldPointRDD.map(lambda pt: pt.features)
matrix = RowMatrix(featureRDD)

# 求协方差
matrix.computeCovariance()
matrixSummary = matrix.computeColumnSummaryStatistics()
# 查看每列的均值
print(matrixSummary.mean())
# 查看每列的方差
print(matrixSummary.variance())
# L1正则化
print(matrixSummary.normL1())
# L2正则化
print(matrixSummary.normL2())

mean = sc.broadcast(matrixSummary.mean())
variance = sc.broadcast(matrixSummary.variance())


# def data_scalar(data):
#     for i in range(len(data)):

import numpy as np


print(featureRDD.map(lambda pt: pt-mean.value).map(lambda pt: pt/np.sqrt(variance.value)).first())


scaler = StandardScaler(withMean=True).fit(featureRDD)
print(scaler.transform(featureRDD).first())