package com.km.algorithm

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.stat.Statistics

/**
  * Created by lenovo on 2017/4/17.
  */
object Test1 {

  /*  public double getAverage(){
      int sum = 0;
      for(int i = 0;i < num;i++){
        sum += array[i];
      }
      return (double)(sum / num);
    }*/


  def main(args: Array[String]): Unit = {
    val A = Array(1.3, 1.5, 1.5, 0.7, 1.1, 1.0, 0.8, 1.9, 1.7, 0.2)
    val B = Array(1.2, 1.9, 1.0, 0.3, 1.6, 1.4, 0.9, 1.1, 1.1, 0.5);
    println(A.getClass)


    val v1 = Vectors.dense(1.3, 1.5, 1.5, 0.7, 1.1, 1.0, 0.8, 1.9, 1.7, 0.2)

    val v2 = Vectors.dense(1.2, 1.9, 1.0, 0.3, 1.6, 1.4, 0.9, 1.1, 1.1, 0.5)

    val c1 = Statistics.chiSqTest(v1, v2)
    println(c1)

    val sparkConf = new SparkConf().setAppName("TTest").setMaster("local[8]")
    val sc = new SparkContext(sparkConf)
    /* val data =[1, 2, 3, 4, 5]
    val  rdd = sc.parallelize(data)
 //    def fun(i){i}
 //      return i % 2
     val result = rdd.groupBy(fun).collect()*/
    val words = Array("one", "two", "two", "three", "three", "three")

    val wordPairsRDD = sc.parallelize(words).map(word => (word, 1))
    //    val abc = wordPairsRDD.groupBy(x => { if(x == 'one') "even" else "odd" }).collect()
    val a = sc.parallelize(1 to 9, 3)
    val b = a.groupBy(x => {
      if (x % 2 == 0) "even" else "odd"
    }).collect //分成两组
    b.foreach(println)
    //    def myfunc(a: Int) : Int =
    //    {
    //      a % 2//分成两组
    //    }
    //    a.groupBy(myfunc).collect

    val wordCountsWithReduce = wordPairsRDD.reduceByKey(_ + _)
    println(wordCountsWithReduce)
    val wordCountsWithGroup = wordPairsRDD.groupByKey().map(t => (t._1, t._2.sum))
    println(wordCountsWithGroup)


    println()
  }

}
