package RDD_homework
import org.apache.spark.HashPartitioner
import org.apache.spark.{SparkConf, SparkContext}

object six_work {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("work6")
    val sc = new SparkContext(sparkConf)
//    first_1(sc)
//    first_2(sc)
//    first_3(sc)
//    first_4(sc)
//    first_5(sc)
//    first_6(sc)
//    first_7(sc)
//    second(sc)
    third(sc)
  }

  def first_1(sc : SparkContext): Unit ={
    val lines = sc.textFile("D:\\data\\chapter5-data1.txt")
    val par = lines.map(row=>row.split(",")(0))
    val distinct_par = par.distinct()
    val res = distinct_par.count()
    println(res)
  }

  def first_2(sc : SparkContext): Unit ={
    val lines = sc.textFile("D:\\data\\chapter5-data1.txt")
    val par = lines.map(row=>row.split(",")(1))
    val distinct_par = par.distinct()
    val res = distinct_par.count()
    println(res)
  }

  def first_3(sc : SparkContext): Unit ={
    val lines = sc.textFile("D:\\data\\chapter5-data1.txt")
    val pare = lines.filter(row=>row.split(",")(0) =="Tom")
    pare.foreach(println)
    val res = pare.map(row=>(row.split(",")(0),row.split(",")(2).toInt)).mapValues(x=>(x,1)).reduceByKey((x,y)=>(x._1+y._1,x._2+y._2)).mapValues(x=>(x._1/x._2)).collect().mkString(",")
    println(res)
  }

  def first_4(sc : SparkContext): Unit ={
    val lines = sc.textFile("D:\\data\\chapter5-data1.txt")
    val pare = lines.map(row=>(row.split(",")(0),row.split(",")(1)))
    pare.foreach(println)
    pare.mapValues(x => (x,1)).reduceByKey((x,y) => (" ",x._2 + y._2)).mapValues(x => x._2).foreach(println)
  }

  def first_5(sc : SparkContext): Unit ={
    val lines = sc.textFile("D:\\data\\chapter5-data1.txt")
    val pare = lines.filter(row=>row.split(",")(1) == "DataBase")
    val res = pare.count()
    println(res)
  }

  def first_6(sc : SparkContext): Unit ={
    val lines = sc.textFile("D:\\data\\chapter5-data1.txt")
    val pare = lines.map(row=>(row.split(",")(1),row.split(",")(2).toInt))
    val res = pare.mapValues(x=>(x,1)).reduceByKey((x,y) => (x._1+y._1,x._2+y._2)).mapValues(x => (x._1/x._2)).collect().mkString(",")
    println(res)
  }

  def first_7(sc : SparkContext): Unit ={
    val lines = sc.textFile("D:\\data\\chapter5-data1.txt")
    val pare = lines.filter(row=>row.split(",")(1) == "DataBase").map(row=>(row.split(",")(1),1))
    val accum = sc.longAccumulator("My Accumulator")
    pare.values.foreach(x => accum.add(x))
    println(accum.value)
  }

  def second(sc : SparkContext): Unit ={
    val dataFile = "D:\\data\\data1\\dataA.txt,D:\\data\\data1\\dataB.txt"
    val data = sc.textFile(dataFile,2)
    val res = data.filter(_.trim().length>0).map(line=>(line.trim,"")).partitionBy(new HashPartitioner(1)).groupByKey().sortByKey().keys
    res.saveAsTextFile("D:\\data\\data1\\result1")
  }

  def third(sc : SparkContext): Unit ={
    val dataFile = "D:\\data\\data2\\gread_Algorithm.txt,D:\\data\\data2\\gread_Database.txt,E:\\scalawork\\data\\data2\\gread_Python.txt"
    val data = sc.textFile(dataFile,3)

    val res = data.filter(_.trim().length>0).map(line=>(line.split(" ")(0).trim(),line.split(" ")(1).trim().toInt)).partitionBy(new HashPartitioner(1)).groupByKey().map(x => {
      var n = 0
      var sum = 0.0
      for(i <- x._2){
        sum = sum + i
        n = n +1
      }
      val avg = sum/n
      val format = f"$avg%1.2f".toDouble
      (x._1,format)
    })
    res.saveAsTextFile("D:\\data\\data2\\result2")
  }

}
