package com.bj58.test

import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.regression.LinearRegression
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.functions.expr
import org.apache.spark.sql.functions.rank
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}

/**
  * Created by 6v on 2018/12/28.
  */
object AdMonth {

  def main(args: Array[String]) {

    val path = "C:\\Users\\lenovo\\Desktop\\mon1.csv";
    //    val negativeFilePath = "C:\\Users\\lenovo\\Desktop\\b.txt";

    val sparkConf = new SparkConf().setAppName("AdMonth").setMaster("local[4]")
    val sc = new SparkContext(sparkConf)
    val spark = SparkSession.builder.getOrCreate()

    var data = spark.read.textFile(path).rdd.map(row=> Row.fromSeq(row.split("\t")))

    val schema = StructType(
      Seq(
        StructField("year", StringType, true)
        , StructField("month", StringType, true)
        , StructField("income", StringType, true)
      )
    )

    var df = spark.createDataFrame(data,schema)
    df = df.withColumn("year",expr("cast(year as double)"))
       .withColumn("income",expr("cast (income as double)"))
         .withColumn("month",expr("cast (month as double)"))
//      .withColumn("id",rank())
//    df=df.withColumn("month",expr(" substr(month,0,length(month))"))
//    println(df.count())
    df.show()
    println(df.schema)
    val colArray2 = Array("year", "month")
    val vecDF: DataFrame = new VectorAssembler().setInputCols(colArray2).setOutputCol("features").transform(df)
    val model =new LinearRegression().setFeaturesCol("features").setLabelCol("income").setFitIntercept(true)
      .setMaxIter(500).setRegParam(0.3).setElasticNetParam(1)//建立模型
    val lrModel = model.fit(vecDF.filter(row=>row.getAs[Double]("year")<=2018.0 ))

    // 输出模型全部参数
    lrModel.extractParamMap()
    // Print the coefficients and intercept for linear regression
    println(s"Coefficients: ${lrModel.coefficients} Intercept: ${lrModel.intercept}")


    val predictions = lrModel.transform(vecDF.filter(row=>row.getAs[Double]("year")>2018.0 ))
    val trainingSummary = lrModel.summary
    println(s"r2: ${trainingSummary.r2}")
    predictions.show(12)

    spark.close()

  }
  }
