package com.niit.lcy.towndistdensity;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * @Author: Chase
 * @Date: 2024/12/10
 */

public class TownDistDensityReducer extends Reducer<Text, Text, Text, Text> {
    private Text outv = new Text();
    private Map<String, Integer> map = new HashMap<>();
    private List<String> list = new ArrayList<>();
    private double commTotal;// 一个镇的所有小区数

    /**
     * reduce阶段核心业务逻辑（将相同镇的小区进行统计，根据统计的结果计算同镇的各小区的分布密度[小区分布密度=次数/总次数]-同一镇，然后输出）-结果保留5位小数
     *
     * @param key     镇, eg: 白蕉...
     * @param values  小区名称, eg: 香水鸿门...
     * @param context
     * @throws IOException
     * @throws InterruptedException
     */
    @Override
    protected void reduce(Text key, Iterable<Text> values, Reducer<Text, Text, Text, Text>.Context context) throws IOException, InterruptedException {
        for (Text value : values) {
            String data = value.toString();
            map.put(data, map.getOrDefault(data, 0) + 1);
            commTotal++;
        }
        for (Map.Entry<String, Integer> entry : map.entrySet()) {
            list.add(entry.getKey());
            DecimalFormat df = new DecimalFormat("#.#####");// 保留5位小数
            String formattedNumber = df.format((entry.getValue() / commTotal));
            list.add(formattedNumber);
            outv.set(list.toString());
            context.write(key, outv);
            list.clear();
        }

        commTotal = 0;
        map.clear();
    }
}
