package main

import org.apache.hadoop.io.IntWritable
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce.Mapper
import org.apache.hadoop.mapreduce.Reducer
import java.io.IOException
import java.util.*

object WordCount {

  class StringTokenMapper : Mapper<Any?, Text, Text?, IntWritable?>() {
    private val word = Text()
    @Throws(IOException::class, InterruptedException::class)
    override fun map(key: Any?, value: Text, context: Context) {
      //StringTokenizer st = new StringTokenizer(new String(value.getBytes(), 0, value.getLength(), StandardCharsets.UTF_8));
      val st = StringTokenizer(value.toString())
      while (st.hasMoreTokens()) {
        word.set(st.nextToken().replace("!", "").replace("!", ""))
        context.write(word, ONE)
        println("output: <$word, $ONE>")
      }
    }

    companion object {
      private val ONE = IntWritable(1)
    }
  }

  class StringCountReducer : Reducer<Text, IntWritable, Text, IntWritable>() {
    private val result = IntWritable()
    override fun reduce(key: Text?, values: MutableIterable<IntWritable>?, context: Context?) {
      values?.let {
        val sum = it.fold(0) { acc, it -> acc + it.get() }
        result.set(sum)
        context?.write(key, result)
      }
    }
  }
}