package com.gis.bigdata.spark02.core.framework.service

import com.gis.bigdata.spark02.core.framework.common.TService
import com.gis.bigdata.spark02.core.framework.dao.WordCountDAO
import org.apache.spark.rdd.RDD

/**
 * @author LnnuUser
 * @create 2022-10-07-下午9:39
 * 服务层
 */
class WordCountService extends TService{

  private val wordCountDAO = new WordCountDAO()

  // 数据分析
  def dataAnalysis(): Array[(String, Int)] = {
    //TODO 执行业务操作
    //1.读取文件，获取一行一行数据
    // hello world
    val lines: RDD[String] = wordCountDAO.readFile("datas/1.txt")

    //2.一行一行数据拆分，形成一个一个的单词
    // “hello world ” ==> hello world
    // 扁平化处理：将整体拆分为个体的操作
    val word: RDD[String] = lines.flatMap(_.split(" "))

    //2.5 在每个单词的后面加上1 (hello, 1)
    val wordToOne: RDD[(String, Int)] = word.map(
      word => (word, 1)
    )

    //3.相同的单词放在一起，将数据根据单词分组，便于统计
    // (hello, hello, hello, hello) (world,world)
    // (hello,((hello, 1), (hello, 1)))
    //Spark框架提供了更多的功能，可以将分组和聚合使用一个方法实现
    //reduceByKey: 相同的key的数据，可以对value进行reduce聚合
    //    wordToOne.reduceByKey((x, y) => { x + y})
    //    wordToOne.reduceByKey((x, y) => x + y)
    val wordCount: RDD[(String, Int)] = wordToOne.reduceByKey(_ + _)
    //    val wordGroup: RDD[(String, Iterable[(String, Int)])] = wordToOne.groupBy(
    //      t => t._1
    //    )

    //4.对分组后的数据进行转换
    // (hello, hello, hello, hello) ==> (hello, 4)
    //    val wordCount: RDD[(String, Int)] = wordGroup.map {
    //    val wordCount: RDD[(String, Int)] = wordGroup.map {
    //      case (word, list) => {
    //        list.reduce(
    //          (t1, t2) => {
    //            //reduce的操作，传入的是前后两个 t1 =>(hello, 1) t2 =>(hello, 1)
    //            (t1._1, t1._2 + t2._2)
    //          }
    //        )
    //      }
    //    }

    //5. 将转换的结果采集到控制台打印
    val array: Array[(String, Int)] = wordCount.collect()
    array
  }

}
