import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;

public  class JobcityTopNReducer extends Reducer<Text, JobcityTopNFlowBean, JobcityTopNFlowBean, NullWritable>{

    @Override
    protected void reduce(Text key, Iterable<JobcityTopNFlowBean> values,
                          Context context)
            throws IOException, InterruptedException {



        ArrayList<JobcityTopNFlowBean> beanList = new ArrayList<JobcityTopNFlowBean>();

        for (JobcityTopNFlowBean jobBean : values) {

            // 构造一个新的对象，来存储本次迭代出来的值
            JobcityTopNFlowBean newBean = new JobcityTopNFlowBean();
            newBean.set(jobBean.getJob_city(), jobBean.getJob_title(), jobBean.getJob_sub_title(), jobBean.getMax_salary(), jobBean.getMin_edu_level());

            beanList.add(newBean);
            System.out.println(newBean.toString());

        }

        Collections.sort(beanList);
        int flag = 0;

        for (JobcityTopNFlowBean value :
                beanList) {
            //输出前三个
            if (flag<=2) {
                context.write(value, NullWritable.get());
                flag++;
            }
        }
//        for(int i =0;i < 2; i++) {
//            System.out.println(beanList.get(i).toString());
//            context.write(beanList.get(i), NullWritable.get());
//            System.out.println("dd");
//
//        }


    }

}
