package com.sqk.dxpro.dxsta;

import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import com.sqk.dxpro.utils.TProperties;

public class DxStaTwoReducer extends Reducer<Text, Text, NullWritable, Text>{

	@Override
	protected void reduce(Text key, Iterable<Text> values, Reducer<Text, Text, NullWritable, Text>.Context context)
			throws IOException, InterruptedException {
		
		String ps1;
		String psal="";
		String ps2;
		int uvsum=0;
		int pvsum=0;
		Iterator<Text> it=values.iterator();
		while(it.hasNext()){
			Text tmp=it.next();
			ps1 = tmp.toString().split(TProperties.getValue("fileoutsplit"))[0];
			ps2 = tmp.toString().split(TProperties.getValue("fileoutsplit"))[1];
			if(!psal.contains(ps1)) {
				uvsum++;
				psal=psal+ps1;
			}
			pvsum=pvsum+Integer.parseInt(ps2);
		}
		String str=key.toString()+TProperties.getValue("outfilesplit")+uvsum+TProperties.getValue("outfilesplit")+pvsum;
		Text ovalue=new Text(str);
		context.write(NullWritable.get(), ovalue);
	}

}
