package process


import org.apache.hadoop.mapreduce.Mapper
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce.Reducer
import java.lang.Iterable
import scala.collection.mutable.ArrayBuffer
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.hadoop.io.IntWritable



class DegreeMapper 
  extends Mapper[LongWritable, Text, Text, IntWritable]:

  private val t1 = new Text()
  private val t2 = new IntWritable(1)

  override def map(key: LongWritable, value: Text, 
              context: Mapper[LongWritable,Text,Text, IntWritable]#Context): Unit = 

    val splits = value.toString.split(",") 
    val degree = splits(5)
    t1.set(degree)
    context.write(t1, t2)


class DegreeReducer
  extends Reducer[Text, IntWritable, Text, IntWritable]:

    override def reduce(key: Text, 
                      values: Iterable[IntWritable], 
                      context: Reducer[Text, IntWritable, Text, IntWritable]#Context): Unit = 
      var res = 0
      values.forEach {
        v => res += v.get
      }
      context.write(key, new IntWritable(res))


object Degree:
  def run = JobHelper.run(
    "学历分析", 
    Degree.getClass, 
    classOf[DegreeMapper], 
    classOf[DegreeReducer], 
    Array(JobHelper.DATA), 
    "/degree",
    mapValClass = classOf[IntWritable],
    outValClass = classOf[IntWritable]
  )
    
    



