package com.example.hadoop.mapreduce.kmeans;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
import java.util.Iterator;

public class KmeansReducer extends Reducer<IntWritable, Text, IntWritable, Text> {
    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        super.setup(context);
    }

    @Override
    protected void reduce(IntWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
        // super.reduce(key, values, context);
        double nSamples = 0.0;
        Iterator<Text> valueIterator = values.iterator();
        double[] center = new double[2];
        center[0] = 0.0;
        center[1] = 0.0;
        while (valueIterator.hasNext()) {
            Text value = valueIterator.next();
            double[] data = new double[2];
            System.out.println(String.format("%s, %d", value.toString(), value.toString().split(",").length));
            String[] content = value.toString().trim().split(",");
            data[0] = Double.parseDouble(content[1]);
            data[1] = Double.parseDouble(content[2]);
            if (Double.isNaN(data[0])) {
                continue;
            }
            if (Double.isNaN(data[1])) {
                continue;
            }
            center[0] += data[0];
            center[1] += data[1];
            nSamples += 1.0;
        }
         center[0] /= nSamples;
         center[1] /= nSamples;
        context.write(key, new Text(String.format("-1,%f,%f", center[0], center[1])));
    }

    @Override
    protected void cleanup(Context context) throws IOException, InterruptedException {
        super.cleanup(context);
    }

    @Override
    public void run(Context context) throws IOException, InterruptedException {
        super.run(context);
    }
}
