from pyspark.ml.linalg import Vector,Vectors
from pyspark import SparkContext,SparkConf
from pyspark.sql import Row,functions
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml import Pipeline
from pyspark.ml.feature import IndexToString,StringIndexer,VectorIndexer,HashingTF,Tokenizer
from pyspark.ml.classification import LogisticRegression,LogisticRegressionModel,BinaryLogisticRegressionSummary

def f(x):
    rel={}
    rel['features']=Vectors.dense(float(x[0]),float(x[1]),float(x[2]),float(x[3]))
    rel['label']=str(x[4])
    return rel

if __name__ == '__main__':
    conf = SparkConf().setMaster('local').setAppName('wordcount')
    sc=SparkContext(conf=conf)

    data=sc.textFile('./Iris.txt').map(lambda line:line.split(',')).map(lambda p:Row(**f(p))).toDF()

    data.show()