package com.km.algorithm

import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.mllib.linalg.{DenseVector, Vector, Vectors}
import org.apache.spark.mllib.stat.MultivariateStatisticalSummary
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DoubleType, StructField, StructType}
import org.apache.spark.sql.{Column, DataFrame, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

/**
  * Created by lenovo on 2017/4/19.
  */
object FTest2 {

  /**
    * 矩阵转置
    *
    * @param xss
    * @return
    */
  def transposeDouble(xss: Array[Array[Double]]): Array[Array[Double]] =
    for (i <- Array.range(0, xss(0).length)) yield
      for (xs <- xss) yield xs(i)

  def main(args: Array[String]) {

    val positiveFilePath = "C:\\Users\\lenovo\\Desktop\\abalone.csv";
    //    val negativeFilePath = "C:\\Users\\lenovo\\Desktop\\b.txt";

    val sparkConf = new SparkConf().setAppName("TTest").setMaster("local[8]")
    val sc = new SparkContext(sparkConf)


    val sparkSession = SparkSession.builder.getOrCreate
    val schema = StructType(
      Seq(
        StructField("column1", DoubleType, true)
        , StructField("column2", DoubleType, true)
      )
    )
    //    val outputDf:DataFrame = sparkSession.createDataFrame(rowRDD, schema).toDF();
    val dm: RDD[Vector] = sc.textFile("C:\\Users\\lenovo\\Desktop\\abalone.csv").map { line =>
      val values = line.split(',').map(_.toDouble)
      Vectors.dense(values)
    }
    val ma = dm.map(_.toArray).take(dm.count.toInt)
    val tma = transposeDouble(ma);
    print(tma)
    //    val arr1 =  Vectors.dense(tma(0))
    //    val arr2 = Vectors.dense(tma(1))
    //    var a:Column = outputDf.apply("column1");

    //    outputDf.show
    //    println("====dataframe======")
    //    println(outputDf.show)


    //    val rddData:RDD[Vector]=outputDf.map{ case Row(v: Vector) => v}
    //    println("=========="+positiveRDD.get
    //    val summary: MultivariateStatisticalSummary = Statistics.colStats(positiveRDD)
    //    val summary: Matrix = Statistics.corr(positiveRDD)
    //    println("======="+summary.variance)
    //    val negativeRDD = sc.textFile(negativeFilePath).map(x=>{
    //      val s = x.split(",")
    //      val l = s.length
    //      s.foreach( print)
    //      var res = new ArrayBuffer[Double]()
    //      s.foreach{
    //        x => res += x.toDouble
    //
    //      res
    //    })


    val mat = new RowMatrix(dm);
    // Get its size.
    val m = mat.numRows()
    val n = mat.numCols()
    println("m=" + m + "n=" + n)

    // 统计
    val stasticSummary: MultivariateStatisticalSummary = mat.computeColumnSummaryStatistics()
    println("===============" + stasticSummary.variance)
    // 计数
    println("// 计数")
    println(stasticSummary.count)
    // 均值
    println("// 均值")
    println(stasticSummary.mean)
    // 结果：3.5,4.5,4.0

    // 方差
    println("// 方差")
    println(stasticSummary.variance)


    //F
    val df = stasticSummary.count - 1;
    val f = stasticSummary.variance(0) / stasticSummary.variance(1)


    println("// F value")
    println(f)
    // P value
    println("// P value")
    val pf = FPvalue.probF(f, df, df)
    println(pf)


  }
}
