package process


import org.apache.hadoop.mapreduce.Mapper
import org.apache.hadoop.mapreduce.Reducer
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.io.IntWritable
import org.apache.hadoop.io.Text
import java.lang.Iterable





class SkillMapper 
extends Mapper[LongWritable, Text, Text, IntWritable]:

  private val t1 = new Text()
  private val t2 = new IntWritable(1)

  override def map(key: LongWritable, value: Text, 
              context: Mapper[LongWritable,Text,Text, IntWritable]#Context): Unit = 

    val splits = value.toString.split(",") 
    val skills = splits(7).split(";")
    skills.foreach {
      skill => 
        t1.set(skill)
        context.write(t1, t2)
    }


class  SkillReducer 
  extends Reducer[Text, IntWritable, Text, IntWritable]:

    override def reduce(key: Text, 
                      values: Iterable[IntWritable], 
                      context: Reducer[Text, IntWritable, Text, IntWritable]#Context): Unit = 
      var res = 0
      values.forEach {
        v => res += v.get
      }
      context.write(key, new IntWritable(res))


object Skill:
  def run = JobHelper.run(
    "工作技能词云图",
    Skill.getClass,
    classOf[SkillMapper],
    classOf[SkillReducer],
    Array(JobHelper.DATA),
    "/skill",
    mapValClass = classOf[IntWritable],
    outValClass = classOf[IntWritable]
  )
  





