package com.hu.wc

import org.apache.flink.api.common.functions.AggregateFunction
import org.apache.flink.streaming.api.scala._

/**
 * @Author: hujianjun
 * @Create Date: 2020/12/2 16:22
 * @Describe: 求各个班级英语成绩平均分
 */

case class ScoreLog(classId: String, name: String, score: Int)

case class ScoreLogResult(classId: String, scores: Int, userCnt: Int)

object ClassAvgScore {
  def main(args: Array[String]): Unit = {
    //构建班级分数
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    val inputStream = env.readTextFile(getClass.getResource("/data/score.csv").getPath)
    val dataStream = inputStream.map(data => {
      val arr = data.split(",")
      ScoreLog(arr(0), arr(1), arr(2).toInt)
    })

    val resultStream = dataStream
      .keyBy(_.classId)
      .countWindow(3)
      .aggregate(new ScoreLogAgg())

    resultStream.print("avg score")

    env.execute("statistic class avg score job")
  }
}

class ScoreLogAgg extends AggregateFunction[ScoreLog, ScoreLogResult, (String, Double)] {
  override def createAccumulator(): ScoreLogResult = new ScoreLogResult(null, 0, 0)

  override def add(value: ScoreLog, accumulator: ScoreLogResult): ScoreLogResult = {
    ScoreLogResult(value.classId, accumulator.scores + value.score, accumulator.userCnt + 1)
  }

  override def getResult(accumulator: ScoreLogResult): (String, Double) = (accumulator.classId, (accumulator.scores * 1.0 / accumulator.userCnt).formatted("%.2f").toDouble)

  override def merge(a: ScoreLogResult, b: ScoreLogResult): ScoreLogResult = {
    ScoreLogResult(a.classId, a.scores + b.scores, a.userCnt + b.userCnt)
  }
}