from pyspark.ml.linalg import Vectors
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml import Pipeline
from pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer
from pyspark.ml.classification import LogisticRegression
spark = SparkSession.builder.master('local').appName('logisticRegression').getOrCreate()


def load_data(x):
    rel = {}
    rel['features'] = Vectors.dense(float(x[0]), float(x[1]), float(x[2]), float(x[3]))
    rel['label'] = str(x[4])
    return rel
data = spark.sparkContext.textFile('./Iris.txt').map(lambda line: line.split(',')).map(lambda p: Row(**load_data(p))).toDF()
data.show()

labelIndexer = StringIndexer(inputCol='label',outputCol='indexedLabel').fit(data)
featureIndexer = VectorIndexer(inputCol='features',outputCol='indexedFeatures').fit(data)
# 转换成数值型的


lr = LogisticRegression(labelCol='indexedLabel',featuresCol='indexedFeatures',maxIter=100,regParam=0.3,elasticNetParam=0.8)
print(lr.explainParams())


labelConverter=IndexToString(inputCol='prediction',outputCol='predictedLabel').setLabels(labelIndexer.labels)#转回去
pipe=Pipeline()
pipe.setStages([labelIndexer,featureIndexer,lr,labelConverter])

trainingData,testData=data.randomSplit([0.7,0.3])
lrPipelineModel=pipe.fit(trainingData)
lrPredictions=lrPipelineModel.transform(testData)

preRel=lrPredictions.select('predictedLabel','label','features','probability').collect()
for item in preRel:
   print(str(item['label'])+','+str(item['features'])+' probability='+str(item['probability'])+',predictedLabel'+str(item['predictedLabel']))

evaluator=MulticlassClassificationEvaluator(labelCol='indexedLabel',predictionCol='prediction')
print(evaluator.evaluate(lrPredictions))