package cn.tedu.batch

import org.apache.flink.api.common.cache.DistributedCache
import org.apache.flink.api.common.functions.RichMapFunction
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.configuration.Configuration

import java.io.File
import scala.io.Source

/**
 * @author Amos
 * @date 2022/5/22
 */

object BatchDistributeCacheDemo {
  def main(args: Array[String]): Unit = {
    // 构建环境
    val env = ExecutionEnvironment.getExecutionEnvironment

    // 分布式缓存的注册
    env.registerCachedFile("hdfs://hadoop01:8020/test/input/distribute_cache_student","student")

    // 定义成绩的数据源
    import org.apache.flink.api.scala._
    val scoreSource: DataSet[(Int, String, Int)] = env.fromCollection(List((1, "语文", 50), (2, "数学", 70), (3, "英文", 86)))

    // (1, "语文", 50) => ("张三", "语文", 50)
    val result = scoreSource.map(new RichMapFunction[(Int, String, Int), (String, String, Int)] {

      // 定义学生信息的全局变量
      var stuMap: Map[Int, String] = null

      override def open(parameters: Configuration): Unit = {
        // 获取分布式缓存的数据
        val stuFile: File = getRuntimeContext.getDistributedCache.getFile("student")
        // 读取文件
        val stuIter: Iterator[String] = Source.fromFile(stuFile).getLines()
        // 将iter类型转换为map类型
        stuMap = stuIter.map(x => {
          val fields = x.split(",")
          (fields(0).toInt, fields(1))
        }).toMap
      }

      override def map(value: (Int, String, Int)): (String, String, Int) = {
        // (1, "语文", 50) => ("张三", "语文", 50)
        (stuMap.getOrElse(value._1, ""), value._2, value._3)
      }
    })

    result.print()
  }

}
