package com.jida.hadoop.mr.tools;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.TreeMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.curator.framework.recipes.leader.Participant;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.hamcrest.core.SubstringMatcher;

import com.sun.xml.internal.ws.api.server.Container;

import com.jida.hadoop.mr.tools.StringComparator;
import com.jida.hadoop.mr.tools.TProperties;



public class UVMapper extends Mapper<LongWritable, Text, Text, Text> {
	private Text ovalue = new Text();
	@Override
	protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
		// 获取从HDFS中加载的表
		String k;
		String[] values = value.toString().split(TProperties.getValue("fileoutsplit"));
		String ov;
//		002229000000000000|513049037693|2
//		002229000000000000|513049037694|1
		k=values[0];
		StringBuffer sb = new StringBuffer();
		sb.append(values[1]).append(TProperties.getValue("outfilesplit")).append(values[2]);
		ovalue = new Text(sb.toString());
		context.write(new Text(k), ovalue);
		System.out.println(ovalue);
		return ;
		}
	}