package jobPerCity;

import Bean.JobPerCityBean;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class JobPerCityReducer extends Reducer<Text, JobPerCityBean, Text, NullWritable> {
    private Map<String, Integer> cityJobCount = new HashMap<>();
    private List<JobPerCityBean> resultList = new ArrayList<>();

    @Override
    public void reduce(Text key, Iterable<JobPerCityBean> values, Context context)
            throws IOException, InterruptedException {

        // 计数
        for (JobPerCityBean bean : values) {
            String k = bean.getCityJobType();
            cityJobCount.put(k, cityJobCount.getOrDefault(k, 0) + 1);
        }

        // 将Map中的数据转换为Bean列表
        for (Map.Entry<String, Integer> entry : cityJobCount.entrySet()) {
            String[] parts = entry.getKey().split("_");
            JobPerCityBean bean = new JobPerCityBean(parts[0], parts[1]);
            bean.setCount(entry.getValue());
            resultList.add(bean);
        }

        JobPerCityBean.sort(resultList);

        for (JobPerCityBean bean : resultList) {
            context.write(new Text(bean.toString()), NullWritable.get());
        }
    }
}