from pyspark.sql import SparkSession
spark=SparkSession.builder.master('local').appName('w2v').getOrCreate()



from pyspark.ml.feature import VectorIndexer
from pyspark.ml.linalg import Vectors,Vector
df=spark.createDataFrame([
    (Vectors.dense(-1.0,1.0,1.0),),
    (Vectors.dense(-1.0,3.0,1.0),),
    (Vectors.dense(0.0,5.0,1.0),),
],["features"])

# 构建转换器
indexer=VectorIndexer(inputCol="features",outputCol="indexed",maxCategories=2)#一列最大为2的之后就会被转化,再大就不会转换
indexerModel=indexer.fit(df)


categoricalFeatures=indexerModel.categoryMaps.keys()
# print("choose "+str(len(categoricalFeatures))+" features:"+str(categoricalFeatures))

indexed=indexerModel.transform(df)
indexed.show()