package com.pxene.dmp.task;

import java.util.List;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.springframework.batch.core.JobExecutionException;
import org.springframework.batch.core.StepContribution;
import org.springframework.batch.core.scope.context.ChunkContext;
import org.springframework.batch.repeat.RepeatStatus;

import com.pxene.dmp.constant.BaseConstant;
import com.pxene.dmp.constant.DomainList;
import com.pxene.dmp.constant.FileSuffixTime;
import com.pxene.dmp.constant.InvalidList;
import com.pxene.dmp.constant.Province;
import com.pxene.dmp.constant.ApiList;
import com.pxene.dmp.main.JobManager;
import com.pxene.dmp.mapper.ClearDataMapper;

public class ClearDataTask extends MapReduceTask {
	private Log log = LogFactory.getLog(ClearDataTask.class);
	@Override
	public RepeatStatus execute(StepContribution contribution, ChunkContext context) 
			throws Exception {
		String datehour = FileSuffixTime.getDatehour();
		
		String inputPath3g = BaseConstant.HDFS_DATA_ROOT_DIR + "${province}/" 
						   + FileSuffixTime.getDate() + "/"
						   + BaseConstant.FILE_PREFIX 
						   + datehour + "*" 
						   + BaseConstant.FILE_SUFFIX;
		String inputPath4g = BaseConstant.HDFS_DATA_ROOT_DIR + "${province}/" 
						   + FileSuffixTime.getDate() + "/"
						   + BaseConstant.FILE_PREFIX 
						   + datehour + "*" 
						   + BaseConstant.FILE_SUFFIX;
		String outputPath = BaseConstant.HDFS_TEMP_ROOT_DIR 
						  + datehour + "/"
						  + BaseConstant.HDFS_TEMP_CLEARDATA_DIR;
		
		List<String> domainList = DomainList.elements();
		configuration.setStrings("domains", domainList.toArray(new String[domainList.size()]));
		
		List<String> apiList = ApiList.elements();
		configuration.setStrings("apis", apiList.toArray(new String[apiList.size()]));
		
		List<String> invalidList = InvalidList.elements();
		configuration.setStrings("invalids", invalidList.toArray(new String[invalidList.size()]));
		
		Job job = Job.getInstance(configuration);
		job.setJarByClass(JobManager.class);
		// map
		job.setMapperClass(ClearDataMapper.class);
		job.setMapOutputKeyClass(LongWritable.class);
		job.setMapOutputValueClass(Text.class);
		// reduce
		job.setNumReduceTasks(0);
		
		job.setInputFormatClass(SequenceFileInputFormat.class);
		int fileCount = 0;
		long size = 0l;
		for (Province province : Province.values()) {
			FileStatus[] statuses3g = hadoopFs.globStatus(new Path(inputPath3g.replace("${province}", province.getName())));
			for (FileStatus status : statuses3g) {
				String path = status.getPath().toString();
				log.info(BaseConstant.LOG_PREFIX + "data file is " + path);
				FileInputFormat.addInputPaths(job, path);
				fileCount ++;
				size += status.getLen();
			}
			FileStatus[] statuses4g = hadoopFs.globStatus(new Path(inputPath4g.replace("${province}", province.getName()+"_4G")));
			for (FileStatus status : statuses4g) {
				String path = status.getPath().toString();
				log.info(BaseConstant.LOG_PREFIX + "data file is " + path);
				FileInputFormat.addInputPaths(job, path);
				fileCount ++;
				size += status.getLen();
			}
		}
		if (fileCount == 0) {
			throw new JobExecutionException(BaseConstant.LOG_PREFIX + "Data file amount is zero");
		}
		log.info(BaseConstant.LOG_PREFIX + "data size is " + size);
		job.setOutputFormatClass(SequenceFileOutputFormat.class);
		FileOutputFormat.setOutputPath(job, new Path(outputPath));
		
		job.waitForCompletion(true);
		
		return RepeatStatus.FINISHED;
	}
	
}
