package com.bj58.test

import java.text.SimpleDateFormat
import java.util.Calendar

import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.mllib.regression.{ IsotonicRegression, IsotonicRegressionModel }
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.functions.{col, expr, lit, udf}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}
import util.CalendarUtil

import scala.util.Try

/**
  * Created by 6v on 2018/12/15.
  */
object AdIncomeIsotonicPredict {

  val formatter = new SimpleDateFormat("yyyyMMdd")


  def main(args: Array[String]) {

    val path = "C:\\Users\\lenovo\\Desktop\\ad_income.csv";
    val path_pre = "C:\\Users\\lenovo\\Desktop\\ad_income_pre.csv";
    //    val negativeFilePath = "C:\\Users\\lenovo\\Desktop\\b.txt";

    val sparkConf = new SparkConf().setAppName("AdIncomeIsotonicPredict").setMaster("local[4]")
    val sc = new SparkContext(sparkConf)
    val spark = SparkSession.builder.getOrCreate()

    var df = spark.read.format("com.databricks.spark.csv")
//      .option("header", "true")
      .option("inferSchema", "true")
      .load(path).toDF("date","income")

    val data = sc.textFile(path)
    val parsedData = data.map { line =>
      val parts = line.split(',').map(_.toDouble)
      (parts(0), parts(1), 1.0)
    }

    //样本数据划分训练样本与测试样本
    val splits = parsedData.randomSplit(Array(0.6, 0.4), seed = 11L)
    val training = splits(0)
    val test = splits(1)

    val tr = training.collect
    for (i <- 0 to tr.length - 1) {
      println(tr(i)._1 + "\t" + tr(i)._2)
    }
    val te = test.collect
    for (i <- 0 to te.length - 1) {
      println(te(i)._1 + "\t" + te(i)._2)
    }

    // 新建保序回归模型，并训练
    val model = new IsotonicRegression().setIsotonic(true).run(training)
    val x = model.boundaries
    val y = model.predictions
    println("boundaries" + "\t" + "predictions")
    for (i <- 0 to x.length - 1) {
      println(x(i) + "\t" + y(i))
    }


    // 误差计算
    val predictionAndLabel = test.map { point =>
      val predictedLabel = model.predict(point._2)
      (predictedLabel, point._2)
    }
    val print_predict = predictionAndLabel.collect
    println("prediction" + "\t" + "label")
    for (i <- 0 to print_predict.length - 1) {
      println(print_predict(i)._1 + "\t" + print_predict(i)._2)
    }
    val meanSquaredError = predictionAndLabel.map { case (p, l) => math.pow((p - l), 2) }.mean()
    println("Mean Squared Error = " + meanSquaredError)



    val mean =predictionAndLabel.map{_._1}.mean
    val m = predictionAndLabel.map(row=>{
      val label = row._1
      Math.pow(label-mean,2)
    }).sum()

    val p =  predictionAndLabel.map(row=>{
      val label= row._1
      val point= row._2
      Math.pow(label-point,2)
    }).sum
    println(s"m:${m}")
    println(s"p:${p}")
    println(1-p/m)
  }
  val solarUDF : UserDefinedFunction= udf((date: String) => {
    val value =
      Try {
        val myDate = formatter.parse(date)
        val c: Calendar = Calendar.getInstance
        c.setTime(myDate)
        val week = c.get(Calendar.DAY_OF_WEEK)-1
        val isWeekend = if(week ==6 || week==0) 1.0 else 0.0
        Array(date.substring(0,4).toDouble,date.substring(4,6).toDouble,date.substring(6,8).toDouble,week.toDouble,isWeekend)
      }.getOrElse(Array(0.0,0.0,0.0,0.0,0.0))
    value
  })

  val solarToLunarUDF : UserDefinedFunction= udf((date: String) => {
    val value =
      Try {
        val  arr = CalendarUtil.solarToLunar(date)
        Array(arr(0).toDouble,arr(1).toDouble,arr(2).toDouble)
      }.getOrElse(Array(0.0,0.0,0.0))
    value
  })

}
