package process


import org.apache.hadoop.mapreduce.Mapper
import org.apache.hadoop.mapreduce.Reducer
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.io.IntWritable
import org.apache.hadoop.io.Text
import java.lang.Iterable






class SalaryMapper 
extends Mapper[LongWritable, Text, Text, LongWritable]:

  private val t1 = new Text()
  private val t2 = new LongWritable(1)

  override def map(key: LongWritable, value: Text, 
              context: Mapper[LongWritable,Text,Text, LongWritable]#Context): Unit = 

    val splits = value.toString.split(",")
    val salary = splits(1).toLong
    val position = splits(3)
    t1.set(position)
    t2.set(salary)
    context.write(t1, t2)




class  SalaryReducer 
  extends Reducer[Text, LongWritable, Text, LongWritable]:

    override def reduce(key: Text, 
                      values: Iterable[LongWritable], 
                      context: Reducer[Text, LongWritable, Text, LongWritable]#Context): Unit = 
      var res = 0L
      var count = 0L
      values.forEach {
        v => res += v.get
        count += 1
      }
      
      context.write(key, new LongWritable(res /count))


object Salary:
  def run = JobHelper.run(
    "不同岗位的平均薪资",
    Salary.getClass,
    classOf[SalaryMapper],
    classOf[SalaryReducer],
    Array(JobHelper.DATA),
    "/salary",
    mapValClass = classOf[LongWritable],
    outValClass = classOf[LongWritable]
  )
  









