package tech.zhaoxin

import org.apache.spark.mllib.classification.LogisticRegressionWithSGD
import org.apache.spark.mllib.feature.StandardScaler
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author       王磊
 * @Date         2018/12/13
 * @ClassName    LogisticRegressionWithSGDScala
 * @Description  Scala版本Logistic回归算法实现数据预测demo
 * *************************************************
 * 测试数据如下：
 * 0,0 0 0 0
 * 0,0 0 0 1
 * 1,1 0 0 0
 * 1,2 1 0 0
 * 1,2 2 1 0
 * 0,2 0 1 1
 * 1,1 2 1 1
 * 0,0 1 0 0
 * 1,0 2 1 0
 * 1,2 1 1 0
 * 1,0 1 1 1
 * 1,1 1 0 1
 * 1,1 0 1 0
 * 0,2 1 0 1
 * **************************************************
 **/
object LogisticRegressionWithSGDScala {
  def main(args: Array[String]): Unit = {
    // 获取特征数据label features
    val sourceData = getSourceData
    // sourceData.foreach(println)
    // 使用线性回归算法对rdd进行建模运算生成模型，20次迭代运算
    val model = LogisticRegressionWithSGD.train(sourceData,20)
    // 打印模型权重值
    val res = model.weights.toArray
    println("权重值列表如下：")
    for(r <- res) println(r)
    // 构造测试数据容器
    val testData1 = Vectors.dense(1.0,1.0,1.0,1.0)
    val testData2 = Vectors.dense(0.0,0.0,0.0,1.0)
    // 预测测试数据结果
    val res1 = model.predict(testData1)
    val res2 = model.predict(testData2)
    // 打印测试结果
    println("测试数据" + testData1.toString + "预测结果为："+ res1)
    println("测试数据" + testData2.toString + "预测结果为："+ res2)
    // 获取特征变量
    val features = sourceData.map(lab => lab.features)
    // 标准化特征变量
    val standardScalerModel = new StandardScaler(true,true).fit(features)
    // 这里我的理解是，通过原始数据获取一个标准化对象，
    // 在将特征数据在标准化对象的约束下进行归一操作以
    // 获取最终标准化的特征变量
    val sdLabeledPoint = sourceData.map{
      lab =>
        LabeledPoint(lab.label,standardScalerModel.transform(lab.features))
    }
    // 通过标准化的LBPoint数据进行建模运行生成新的回归模型
    val sdModel = LogisticRegressionWithSGD.train(sdLabeledPoint,20)
    // 测试数据测试并且打印结果
    val testData3 = Vectors.dense(1.0,1.0,1.0,1.0)
    val testData4 = Vectors.dense(0.0,0.0,0.0,1.0)
    val res3 = sdModel.predict(testData3)
    val res4 = sdModel.predict(testData4)
    println("标准化特征变量后，测试数据" + testData3.toString + "预测结果为："+ res3)
    println("标准化特征变量后，测试数据" + testData4.toString + "预测结果为："+ res4)
  }

  /**
   * 通过上下文读取文本生成rdd对象，然后转化为LabeledPoint
   * @return
   */
  def getSourceData = {
    val conf = new SparkConf().setAppName("lineregression").setMaster("spark://192.168.61.102:7077")
    val context = new SparkContext(conf)
    val sourceRDD = context.textFile("/opt/bigdata/bayes.txt")
    val labeledPoint = sourceRDD.map{
      line =>
        val arr = line.split(",")
        val label = arr(0).toDouble
        // val label = Array(arr(0).toDouble)
        val features = arr(1).split(" ").map(_.toDouble)
        // val arrs = Array.concat(label,features)
        LabeledPoint(label,Vectors.dense(features))
    }
    labeledPoint
  }
}
