package com.atguigu.userprofile.ml.app

import org.apache.spark.mllib.classification.SVMWithSGD
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import java.util.Properties

/**
 * @author: liminghui
 * @date: 2022/6/26 13:50
 * @version: 1.0
 * @description:
 */
object TaoBao11Spark {
  def main(args: Array[String]): Unit = {

    val sparkConf: SparkConf = new SparkConf().setAppName("SparkMlibTest")
      .setMaster("local[*]")
    val spark: SparkSession = SparkSession.builder()
      .config(sparkConf)
      .config("hive.metastore.uris", "thrift://cdh01:9083")
      .config("spark.sql.warehouse.dir", "hdfs://cdh01:8020/user/hive/warehouse")
      .enableHiveSupport()
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")

    val train_data: RDD[String] = sc.textFile("F:\\data_format\\train_after.csv")
    val test_data: RDD[String] = sc.textFile("F:\\data_format\\test_after.csv")
    val train = train_data.map { line =>
      val parts = line.split(',')
      LabeledPoint(parts(4).toDouble,
        Vectors.dense(parts(1).toDouble, parts(2).toDouble, parts(3).toDouble)
      )
    }

    val test = test_data.map { line =>
      val parts = line.split(',')
      LabeledPoint(parts(4).toDouble, Vectors.dense(parts(1).toDouble, parts(2).toDouble, parts(3).toDouble))
    }

    val numIterations = 1000
    val model = SVMWithSGD.train(train, numIterations)
    //
    //    model.clearThreshold()
    //    val scoreAndLabels: RDD[String] = test.map { point =>
    //      val score = model.predict(point.features)
    //      score + " " + point.label
    //    }
    //    //    scoreAndLabels.take(10).foreach(println)
    //
    //    // 如果我们设定了阀值，则会把大于阈值的结果当成正预测，小于阈值的结果当成负预测
    //    model.setThreshold(0.0)
    //    scoreAndLabels.take(10).foreach(println)

    model.clearThreshold()
    val scoreAndLabels: RDD[String] = test.map { point =>
      val score = model.predict(point.features)
      score + " " + point.label
    }
    scoreAndLabels.take(10).foreach(println)

    //设置回头客数据
    val rebuyRDD: RDD[Array[String]] = scoreAndLabels.map(_.split(" "))
    rebuyRDD.take(10).foreach(println)

    //下面要设置模式信息
    val schema: StructType = StructType(List(StructField("score", StringType, true), StructField("label", StringType, true)))
    //下面创建Row对象，每个Row对象都是rowRDD中的一行
    val rowRDD: RDD[Row] = rebuyRDD.map(p => Row(p(0).trim, p(1).trim))
    //建立起Row对象和模式之间的对应关系，也就是把数据和模式对应起来
    val rebuyDF: DataFrame = spark.createDataFrame(rowRDD, schema)
    //下面创建一个prop变量用来保存JDBC连接参数
    val prop = new Properties()
    prop.put("user", "root") //表示用户名是root
    prop.put("password", "123456") //表示密码是hadoop
    prop.put("driver", "com.mysql.jdbc.Driver") //表示驱动程序是com.mysql.jdbc.Driver
    //下面就可以连接数据库，采用append模式，表示追加记录到数据库dbtaobao的rebuy表中
    rebuyDF.write.mode("append").jdbc("jdbc:mysql://localhost:3306/dbtaobao", "dbtaobao.rebuy", prop)


  }

}
