package com.km.algorithm

/**
  * Created by lenovo on 2017/4/17.
  */

import org.apache.commons.math3.distribution.ChiSquaredDistribution
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.mllib.linalg._
import org.apache.spark.mllib.stat.MultivariateStatisticalSummary
import org.apache.spark.mllib.stat.test.ChiSqTestResult
//import org.apache.spark.mllib.stat.test.ChiSqTest.NullHypothesis.Value
//import org.apache.spark.mllib.stat.test.ChiSqTest.PEARSON
//import org.apache.spark.mllib.stat.test.ChiSqTest._
//import org.apache.spark.mllib.stat.test.ChiSqTestResult
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
//import spark.implicits._;

object CovarianceExample {

  //  private def methodFromString(methodName: String): Method = {
  //    methodName match {
  //      case PEARSON.name => PEARSON
  //      case _ => throw new IllegalArgumentException("Unrecognized method for Chi squared test.")
  //    }
  //  }

  case class Method(name: String, chiSqFunc: (Double, Double) => Double)

  val PEARSON = new Method("pearson", (observed: Double, expected: Double) => {
    val dev = observed - expected
    dev * dev / expected
  })

  private def methodFromString(methodName: String): Method = {
    methodName match {
      case PEARSON.name => PEARSON
      case _ => throw new IllegalArgumentException("Unrecognized method for Chi squared test.")
    }
  }

  object NullHypothesis extends Enumeration {
    type NullHypothesis = Value
    val goodnessOfFit = Value("observed follows the same distribution as expected.")
    val independence = Value("the occurrence of the outcomes is statistically independent.")
  }


  def matrixToRDD(m: Matrix, sc: SparkContext): RDD[Vector] = {
    val columns = m.toArray.grouped(m.numRows)
    val rows = columns.toSeq.transpose // Skip this if you want a column-major RDD.
    val vectors = rows.map(row => new DenseVector(row.toArray))
    sc.parallelize(vectors)
  }

  def main(args: Array[String]) {
    val conf = new SparkConf().setAppName("CovarianceExample").setMaster("local[8]")
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)

    //输入数据
    val data = Array(
      Vectors.dense(4.0, 2.0, 3.0),
      Vectors.dense(5.0, 6.0, 1.0),
      Vectors.dense(2.0, 4.0, 7.0),
      Vectors.dense(3.0, 6.0, 5.0)
    )

    // Array[Vector]转换成DataFrame
    val df: DataFrame = sqlContext.createDataFrame(data.map(Tuple1.apply)).toDF("features")

    val obsArr = Array(1.3, 1.5, 1.5, 0.7, 1.1, 1.0, 0.8, 1.9, 1.7, 0.2)
    //    val expArr = Array(1.2,1.9,1.0,0.3,1.6,1.4,0.9,1.1,1.1,0.5);
    val expected: Vector = Vectors.dense(1.2, 1.9, 1.0, 0.3, 1.6, 1.4, 0.9, 1.1, 1.1, 0.5)
    val methodName: String = "pearson"
    //    val method = PEARSON
    val method = methodFromString(methodName)
    val size = obsArr.size


    val expArr = if (expected.size == 0) Array.tabulate(size)(_ => 1.0 / size) else expected.toArray
    val obsSum = obsArr.sum
    val expSum = if (expected.size == 0.0) 1.0 else expArr.sum
    val scale = if (math.abs(obsSum - expSum) < 1e-7) 1.0 else obsSum / expSum
    val statistic = obsArr.zip(expArr).foldLeft(0.0) { case (stat, (obs, exp)) =>
      //      if (exp == 0.0) {
      //        if (obs == 0.0) {
      //          throw new IllegalArgumentException("Chi-squared statistic undefined for input vectors due"
      //            + " to 0.0 values in both observed and expected.")
      //        } else {
      //          return new ChiSqTestResult(0.0, size - 1, Double.PositiveInfinity, PEARSON.name,
      //            NullHypothesis.goodnessOfFit.toString)
      //        }
      //      }
      if (scale == 1.0) {
        stat + method.chiSqFunc(obs, exp)
      } else {
        stat + method.chiSqFunc(obs, exp * scale)
      }
    }
    val df2 = obsArr.length - 1
    val pValue = 1.0 - new ChiSquaredDistribution(df2).cumulativeProbability(statistic)
    println("// pValue")
    println(pValue)

    // DataFrame转换成RDD
    //    val rddData:RDD[Vector]=df.select("features").map{ case Row(v: Vector) => v}

    //    val mat = new RowMatrix(rows)
    // RDD转换成RowMatrix
    //    val mat: RowMatrix = new RowMatrix(rddData)
    //    val dm: Matrix = Matrices.dense(3, 2, Array(1.0, 3.0, 5.0, 2.0, 4.0, 6.0))
    val dm: Matrix = Matrices.dense(10, 2, Array(1.3, 1.5, 1.5, 0.7, 1.1, 1.0, 0.8, 1.9, 1.7, 0.2, 1.2, 1.9, 1.0, 0.3, 1.6, 1.4, 0.9, 1.1, 1.1, 0.5))
    val rows: RDD[Vector] = matrixToRDD(dm, sc)
    val mat = new RowMatrix(rows)
    // 统计
    val stasticSummary: MultivariateStatisticalSummary = mat.computeColumnSummaryStatistics()
    // 计数
    println("// 计数")
    println(stasticSummary.count)
    // 均值
    println("// 均值")
    println(stasticSummary.mean)
    // 结果：3.5,4.5,4.0

    // 方差
    println("// 方差")
    println(stasticSummary.variance)
    // 结果：1.6666666666666667,3.6666666666666665,6.666666666666667


    // 协方差
    println("// 协方差")
    val covariance: Matrix = mat.computeCovariance()
    println(covariance)

    // 结果：
    //  cov(dim1,dim1) cov(dim1,dim2) cov(dim1,dim3)
    //  cov(dim2,dim1) cov(dim2,dim2) cov(dim2,dim3)
    //  cov(dim3,dim1) cov(dim3,dim2) cov(dim3,dim3)
    //  1.6666666666666679   0.3333333333333357   -3.3333333333333304
    //  0.3333333333333357   3.666666666666668    -0.6666666666666679
    //  -3.3333333333333304  -0.6666666666666679  6.666666666666668
    // 结果分析：以cov(dim1,dim2)为例
    //  dim1均值：3.5  dim2均值：4.5
    //  val cov(dim2,dim3)=((4.0-3.5)*(2.0-4.5)+(5.0-3.5)*(6.0-4.5)+(2.0-3.5)*(4.0-4.5)+(3.0-3.5)*(6.0-4.5))/(4-1)
    //  cov(dim2,dim3)=0.3333333333333333
  }

}
