from pyspark import SparkContext
from pyspark.mllib.classification import LogisticRegressionWithSGD, SVMWithSGD, NaiveBayes
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.mllib.linalg.distributed import RowMatrix
from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD
from pyspark.mllib.tree import DecisionTree
from pyspark.sql.session import SparkSession
from pyspark.sql.types import Row
import os

os.environ['PYSPARK_PYTHON'] = "python3"

spark = SparkSession.builder.getOrCreate()

df = spark.read.csv("file:///Users/sonto/Workspace/P1905/spark_example/train.tsv",
                    header=True, sep="\t")


# def get_cateogry(s):
#     categories = ("recreation", "computer_internet", "computer_internet", "")

def clean_data(data):
    assert isinstance(data, Row)

    items = list(data)
    for i in range(4, len(items)):
        if (items[i] == '?'):
            items[i] = 0
        items[i] = float(items[i])
        if items[i] < 0:
            items[i] = 0

    items[-1] = int(items[-1])
    return items[4:]


labeledPoint = df.rdd.map(clean_data).map(lambda d:LabeledPoint(d[-1], d[0:len(d)-1]))
print(labeledPoint.take(1)[0].features)

# 线性回归分类预测模型
# model = LogisticRegressionWithSGD.train(labeledPoint)
# featureRDD = labeledPoint.map(lambda pt: pt.features)
# model = DecisionTree.trainClassifier(labeledPoint, numClasses=2, categoricalFeaturesInfo={})
# predicated_values = [(float(model.predict(pt.features)), pt.label) for pt in labeledPoint.collect()]
# metrics = BinaryClassificationMetrics(spark.sparkContext.parallelize(predicated_values))

model = NaiveBayes.train(labeledPoint, lambda_=2.0)
metrics = BinaryClassificationMetrics(labeledPoint.map(lambda pt: (float(model.predict(pt.features)), pt.label)))
# model = LogisticRegressionWithSGD.train(labeledPoint)
# metrics = BinaryClassificationMetrics(labeledPoint.map(lambda pt: (float(model.predict(pt.features)), pt.label)))
print(metrics.areaUnderROC)

# matrix = RowMatrix(featureRDD)
# summary = matrix.computeColumnSummaryStatistics()
# mean = spark.sparkContext.broadcast(summary.mean())
# variance = spark.sparkContext.broadcast(summary.variance())
#
#
# import numpy as np
# newLabeledPointRDD = labeledPoint.map(lambda pt: (pt.label, pt.features - mean.value)).map(lambda pt: LabeledPoint(pt[0], pt[1]/np.sqrt(variance.value)))
# model1 = LogisticRegressionWithSGD.train(newLabeledPointRDD)
# metrics = BinaryClassificationMetrics(newLabeledPointRDD.map(lambda pt: (float(model1.predict(pt.features)), pt.label)))
# print(metrics.areaUnderROC)

# metrics = BinaryClassificationMetrics(labeledPoint.map(lambda pt: (float(model.predict(pt.features)), pt.label)))
# print(metrics.areaUnderROC)


# d = model.predict([0.789131,2.055555556,0.676470588,0.205882353,0.047058824,0.023529412,0.443783175,0.0,0.0,0.09077381,0.0,0.245831182,0.003883495,1.0,1.0,24.0,0.0,5424.0,170.0,8.0,0.152941176,0.079129575])

# 支持向量机分类预测模型
# model = SVMWithSGD.train(labeledPoint)
# d = model.predict([0.789131,2.055555556,0.676470588,0.205882353,0.047058824,0.023529412,0.443783175,0.0,0.0,0.09077381,0.0,0.245831182,0.003883495,1.0,1.0,24.0,0.0,5424.0,170.0,8.0,0.152941176,0.079129575])

# 朴素贝叶斯分类预测模型
# model = NaiveBayes.train(labeledPoint)
# d = model.predict([0.789131,2.055555556,0.676470588,0.205882353,0.047058824,0.023529412,0.443783175,0.0,0.0,0.09077381,0.0,0.245831182,0.003883495,1.0,1.0,24.0,0.0,5424.0,170.0,8.0,0.152941176,0.079129575])

# 决策树分类预测模型
# model = DecisionTree.trainClassifier(labeledPoint, 2, {})
# d = model.predict([0.789131,2.055555556,0.676470588,0.205882353,0.047058824,0.023529412,0.443783175,0.0,0.0,0.09077381,0.0,0.245831182,0.003883495,1.0,1.0,24.0,0.0,5424.0,170.0,8.0,0.152941176,0.079129575])

