package com.mi.rdd

import org.apache.spark.sql.SparkSession


object area {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("MedicalInsurance").master("local").getOrCreate()
    val data = spark.read
      .option("header", "true")
      .csv("data/data.csv")

    val areaCheatRDD = data
      .select("RES", "地区")
      .rdd
      .map(x => (x(1), x(0).toString.toInt))

    // 每个地区医保欺诈占比
    areaCheatRDD
      .groupByKey()
      .map(x => (x._1, x._2.sum.toFloat / x._2.size))
      .sortBy(_._2, false)
      .foreach(println)

    spark.stop()
  }
}
