package com.mi.rdd

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession


object hospital {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("MedicalInsurance").master("local").getOrCreate()
    val data = spark.read
      .option("header", "true")
      .csv("data/data.csv")

    val treatmentCostsRDD = data
      .select("医院编码_NN", "治疗费发生金额_SUM")
      .rdd
      .map(x => (x(0), (x(1).toString.toFloat, 1)))

    // 各医院的人均总治疗费用
    treatmentCostsRDD.reduceByKey((a, b) => (a._1+b._1, a._2+b._2)).map(x => (x._1, x._2._1/x._2._2)).sortBy(_._2, false).foreach(println)

    spark.stop()
  }
}
