package com.sqk.task0708;


import java.io.IOException;
import java.util.Iterator;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import com.sqk.dxpro.utils.TProperties;


//输出数据：产品ID   用户ID    PV(访问量) 产品类型
//     car000466|513049888252|1|car
//keyin:用户ID加产品ID
//valuein:[1,1,1,1......]
//keyout:NullWritable
//keyout:用户ID|产品ID|PV(访问量)|产品类型
public class DxCountPvReducer extends Reducer<Text, IntWritable, NullWritable, Text>{

	@Override
	protected void reduce(Text key, Iterable<IntWritable> values,
			Reducer<Text, IntWritable, NullWritable, Text>.Context context) throws IOException, InterruptedException {
		Iterator<IntWritable> it = values.iterator();
		int sum=0;
			//		迭代器
		while(it.hasNext()) {
			IntWritable temp = it.next();
			int i = temp.get();
			sum+=i;
		}
		
		String[] keys = key.toString().split(TProperties.getValue("fileoutsplit"));
		String result = keys[1].toString()+TProperties.getValue("outfilesplit")+keys[0].toString()+TProperties.getValue("outfilesplit")+sum+TProperties.getValue("outfilesplit")+keys[2].toString();
		context.write(NullWritable.get(), new Text(result));
	}

}
