package com.bw.demo.dbiao;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
/*
 * 判断数据来自哪个文本
 */
public class DbiaoMapper extends Mapper<LongWritable, Text, Text, DbiaoBean> {
	protected void map(LongWritable key, Text value, Context context) 
			throws java.io.IOException ,InterruptedException {
		String heng = value.toString();
		String[] split = heng.split(",");
		System.out.println("map===================================");
		/**
		 * 获取对象所属文件名称:
		 * 先获取maptask处理的切片对象
		 * 再获取切片对象的所属文件名
		 */
		FileSplit inputSplit = (FileSplit) context.getInputSplit();
		String pathname = inputSplit.getPath().getName();
		String sid = null;
		DbiaoBean dbioaBean = null;
		if (pathname.indexOf("dingdan")>=0) {
			sid = split[2];
			dbioaBean = new DbiaoBean(split[0], split[1], sid, Integer.valueOf(split[3]), "", "", 0.0f, "dingdan");
		} else {
			sid = split[0];
			dbioaBean = new DbiaoBean("", "", sid, 0, split[1], split[2], Float.valueOf(split[3]), "sp");
		}
		context.write(new Text(sid), dbioaBean);
		
	};
}
