#pyspark.ml.classification.NaiveBayes(self, featuresCol="features", labelCol="label", predictionCol="prediction",\ 
#probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, modelType="multinomial", \
#thresholds=None, weightCol=None)

def NaiveBayes(testdf,traindf):
    from pyspark.sql import Row
    from pyspark.ml.linalg import Vectors
    from pyspark.ml.classification import NaiveBayes
    df = spark.createDataFrame([\
            Row(label=0.0, weight=0.1, features=Vectors.dense([0.0, 0.0])),\
            Row(label=0.0, weight=0.5, features=Vectors.dense([0.0, 1.0])),\
            Row(label=1.0, weight=1.0, features=Vectors.dense([1.0, 0.0]))])
    #multinomial,bernoulli
    nb = NaiveBayes(smoothing=1.0, modelType="multinomial", weightCol="weight")
    model = nb.fit(traindf)
    model.pi
    #DenseVector([-0.81..., -0.58...])
    model.theta
    #DenseMatrix(2, 2, [-0.91..., -0.51..., -0.40..., -1.09...], 1)
    result = model.transform(testdf)
    result.head().prediction
    result.head().probability
    result.head().rawPrediction
    #save params
    nb_path = temp_path + "/nb"
    nb.save(nb_path)
    nb2 = NaiveBayes.load(nb_path)
    nb2.getSmoothing()
    #1.0
    #save model
    model_path = temp_path + "/nb_model"
    model.save(model_path)
    model2 = NaiveBayesModel.load(model_path)

