package com.pxene.dmp.task;

import java.net.URI;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.springframework.batch.core.JobExecutionException;
import org.springframework.batch.core.StepContribution;
import org.springframework.batch.core.scope.context.ChunkContext;
import org.springframework.batch.repeat.RepeatStatus;

import com.pxene.dmp.common.DateUtils;
import com.pxene.dmp.constant.BaseConstant;
import com.pxene.dmp.constant.FileSuffixTime;
import com.pxene.dmp.main.JobManager;
import com.pxene.dmp.mapper.GetUserInfoMapper;
import com.pxene.dmp.reducer.GetUserInfoReducer;

public class GetUserInfoTask extends MapReduceTask {
	
	private Log log = LogFactory.getLog(GetUserInfoTask.class);
	
	@Override
	public RepeatStatus execute(StepContribution contribution, ChunkContext context)
			throws Exception {
		String datehour = FileSuffixTime.getDatehour();
		
		String inputPath = BaseConstant.HDFS_TEMP_ROOT_DIR
				         + datehour + "/"
				   		 + BaseConstant.HDFS_TEMP_CLEARDATA_DIR
				   		 + BaseConstant.HDFS_TEMP_USERINFO_DIR;
		String outputPath = BaseConstant.HDFS_TEMP_ROOT_DIR
				          + datehour + "/"
		  		 		  + BaseConstant.HDFS_TEMP_GETDATA_DIR
		  		 		  + BaseConstant.HDFS_TEMP_USERINFO_DIR;
		
		if (!hadoopFs.exists(new Path(inputPath))) {
			throw new JobExecutionException(BaseConstant.LOG_PREFIX + inputPath + " not exists");
		}
		
		String lastPathTmpl = BaseConstant.HDFS_TEMP_ROOT_DIR
				          + "${datehour}" + "/"
				 		  + BaseConstant.HDFS_TEMP_GETDATA_DIR
				 		  + BaseConstant.HDFS_TEMP_USERINFO_DIR;
		// 上次结果的路径
		String lastPath = null;
		// 4点统计新的一天的数据
		if (!"04".equals(FileSuffixTime.getHour())) {
			// 回溯8小时的数据，如果没有就重新跑数据
			for (int i=1; i<=8; i++) {
				String lastDatehour = DateUtils.getFormat(DateUtils.addHours(DateUtils.getDate(datehour, "yyyyMMddHH"), -i), "yyyyMMddHH");
				// 03点的数据已经发送过，如果回溯到03点就跳出循环
				if (lastDatehour.endsWith("03")) {
					log.info(BaseConstant.LOG_PREFIX + "last path is not existed");
					break;
				}
				lastPath = lastPathTmpl.replace("${datehour}", lastDatehour);
				if (hadoopFs.exists(new Path(lastPath))) {
					log.info(BaseConstant.LOG_PREFIX + "last path is " + lastPath);
					break;
				} else {
					lastPath = null;
				}
			}
		}
		
		Job job = Job.getInstance(configuration, "getUserInfo");
		// 将上次结果分发
		if (lastPath != null) {
			FileStatus[] statuses = hadoopFs.globStatus(new Path(lastPath+"*"));
			for (FileStatus status : statuses) {
				String path = status.getPath().toString();
				if (!path.endsWith("_SUCCESS")) {
					job.addCacheFile(new URI(status.getPath().toString()));
				}
			}
		}
		job.setJarByClass(JobManager.class);
		
		// map
		job.setMapperClass(GetUserInfoMapper.class);
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(LongWritable.class);
		// reduce
		job.setReducerClass(GetUserInfoReducer.class);
		job.setOutputKeyClass(LongWritable.class);
		job.setOutputValueClass(Text.class);
		job.setNumReduceTasks(BaseConstant.REDUCE_NUMBER);
		
		job.setInputFormatClass(SequenceFileInputFormat.class);
		FileInputFormat.addInputPath(job, new Path(inputPath));
		job.setOutputFormatClass(SequenceFileOutputFormat.class);
		FileOutputFormat.setOutputPath(job, new Path(outputPath));
		
		job.waitForCompletion(true);
		
		return RepeatStatus.FINISHED;
	}
	
	

}