package Job

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD

object OnlyFemaleInPer {
  def main(args: Array[String]): Unit = {
    val sc: SparkContext = new SparkContext("local[*]", "WithEndSalArea")
    val inputPath: String = "file/JobData/input"
    val outputPath: String = "file/JobData/output/WithEndSalArea"

    val InfoLine: RDD[String] = sc.textFile(inputPath).repartition(2)

    val res = InfoLine
      .filter(x => x.split(",")(16).contains("民营/私营"))
      .map(x => {
        ("", 1)
      })
      .reduceByKey(_ + _)

    val res2 = InfoLine
      .filter(x => x.split(",")(7).contains("女") && x.split(",")(16).contains("民营/私营"))
      .map(x => {
        ("", 1)
      })
      .reduceByKey(_ + _)


    res.join(res2)
      .map(x => x._2._2*1.0/x._2._1*1.0)
      .foreach(println)

  }

}
