package com.etc

import org.apache.spark.mllib.classification.LogisticRegressionWithSGD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.feature.HashingTF
import org.apache.spark.mllib.regression.LabeledPoint

object testLogisticRegressionWithSGD {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("test").setMaster("sgd")
    val sc = new SparkContext(conf)

    val spam = sc.textFile("src/main/resources/mllib/spam.txt", 1)
    val normal = sc.textFile("src/main/resources/mllib/normal.txt", 1)

    //创建一个HashingTF实例来把邮件文本映射为包含一个10000个特征的向量
    val tf = new HashingTF(numFeatures = 10000) //各邮件都被切分为单词，每个单词被映射为一个特征

    val spamFeatures = spam.map { email => tf.transform(email.split(" ")) }

    val normalFeatures = normal.map { email => tf.transform(email.split(" ")) }

    //创建LabeledPoint数据集分别存放阳性(垃圾邮件)和阴性(正常邮件)的例子
    val positiveExamples = spamFeatures.map { features => LabeledPoint(1, features) }
    val negativeExamples = normalFeatures.map { features => LabeledPoint(0, features) }
    val trainingData = positiveExamples.union(negativeExamples)
    trainingData.cache()
    println(trainingData.toDebugString)
    //使用SGD算法运行逻辑回归
    val model = new LogisticRegressionWithSGD().run(trainingData) //以阳性(垃圾邮件)和阴性(正常邮件)的例子分别进行测试
    val posTest = tf.transform("O M G get cheap stuff by sending money to .".split(" "))
    val negTest = tf.transform("hello, i started studying Spark ".split(" "))
    println(s"prediction for positive tset example: ${model.predict(posTest)}")
    println(s"prediction for negitive tset example: ${model.predict(negTest)}")

    Thread.sleep(Int.MaxValue)


  }

}
