package com.zc.hadoop.lzmh.log;

import java.io.IOException;
import java.util.TreeSet;

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

/**
 * 调用日志信息统计MR
 *
 */
public class LogTotalMapReduce extends Configured implements Tool {

	/**
	 * Mapper
	 */
	public static class LogTotalMapper extends Mapper<LongWritable, Text, Text, LogWritable> {
		// 定义map输出key、value值
		protected Text mapOutputKey = new Text();
		// 
		protected LogWritable mapOutputVlaue = new LogWritable();

		// setup
		@Override
		protected void setup(Context context) throws IOException, InterruptedException {
			super.setup(context);
		}

		@Override
		protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
			// info5033.log.1:2017-09-15 15:05:34,206 INFO [com.lz.api.servlet.EqConfigServlet] - <traceId=851b7aff595f49d8aae49458ee7de126 traceSeq=1  method=getEqVersion end use_time=19>
			// method=getEqVersion end use_time=19
			// 获取每行数据
			String lineValue = value.toString();
			
			String tmpString =  lineValue.substring(lineValue.lastIndexOf("traceSeq"), lineValue.lastIndexOf("="));
			
			String method = tmpString.substring(tmpString.lastIndexOf("=") + 1, tmpString.lastIndexOf("end"));
			String time = lineValue.substring(lineValue.lastIndexOf("=") + 1, lineValue.lastIndexOf(">"));

			if (StringUtils.isNotBlank(method) && StringUtils.isNotBlank(time)) {
				
				mapOutputKey.set(method.trim());
				
				mapOutputVlaue.setCount(1);
				mapOutputVlaue.setTimeValue(Integer.valueOf(time));
				
				context.write(mapOutputKey, mapOutputVlaue);
			}
			

		}

		// cleanup
		@Override
		protected void cleanup(Context context) throws IOException, InterruptedException {
			super.cleanup(context);
		}

	}

	/**
	 * Reduce
	 * LogTotalWritable 最为输出的key应经包办了key跟值，所以第四个参数使用NullWritable进行站位
	 */
	public static class LogTotalReduce extends Reducer<Text, LogWritable, LogTotalWritable, NullWritable> {
		
		// 输出阶段，作为key排序（TreeSet 是一个有序的集合，它的作用是提供有序的Set集合）；（TreeSet中的元素支持2种排序方式：自然排序 或者 根据创建TreeSet 时提供的 Comparator 进行排序。这取决于使用的构造方法。）
		private TreeSet<LogTotalWritable> resultSet = new TreeSet<LogTotalWritable>();
		
		@Override
		protected void setup(Reducer<Text, LogWritable, LogTotalWritable, NullWritable>.Context context)
				throws IOException, InterruptedException {
			super.setup(context);
		}
		
		@Override
		protected void reduce(Text key, Iterable<LogWritable> values, Context context)
				throws IOException, InterruptedException {
			
			TreeSet<MyInt> topSet = new TreeSet<MyInt>();
			
			// 初始值
			int sum = 0;
			int count = 0;
			// 平均值
			double aveValue = 0;
			// 50%平均值
			double halfAveValue = -1.0;
			// 90%平均值
			double ninetyAveValue = -1.0;
			// 99%平均值
			double ninetyNineAveValue = -1.0;
			// 最大值
			int maxValue = -1;
			// 最小值
			int minValue = -1;
			
			// 遍历
			for (LogWritable logValue : values) {
				sum += logValue.getTimeValue();
				count += logValue.getCount();
				
				if (maxValue < logValue.getTimeValue()) {
					maxValue = logValue.getTimeValue();
				}
				
				if (minValue > logValue.getTimeValue() || minValue < 0) {
					minValue = logValue.getTimeValue();
				}
				
				topSet.add(new MyInt(logValue.getTimeValue()));
			}
			
			System.out.println(key.toString() + " count=" + count);
			
			// 把key,value放到自定义的参数中进行输出；即自定义输出类型的实例化
			LogTotalWritable logTotalWritable = new LogTotalWritable(key.toString(), count);
			
			// 50%平均值 首尾个数
			int halfAveCount = (int) (count * 0.5 * 0.5);
			
			// 90%平均值 首尾个数
			int ninetyAveCount = (int) (count * 0.1 * 0.5);
			
			// 99%平均值 首尾个数
			int ninetyNineAveCount = (int) (count * 0.01 * 0.5);
			
			// 50%值
			double halfVal = 0;
			// 90%值
			double ninetyVal = 0;
			// 99%值
			double ninetyNineVal = 0;
			
			if (halfAveCount > 0 || ninetyAveCount > 0 || ninetyNineAveCount > 0) {
				int i = 1;
				int count99 = 0;
				int count90 = 0;
				int count50 = 0;
				
				
				// 遍历
				for (MyInt logValue : topSet) {
//					int timeValue = logValue.getTimeValue();
					int timeValue = logValue.getValue();
					
					if (ninetyNineAveCount > 0 && i > ninetyNineAveCount && i <= (count - ninetyNineAveCount)) {
						ninetyNineVal += timeValue;
						count99 ++;
					}
					
					if (ninetyAveCount > 0 && i > ninetyAveCount && i <= (count - ninetyAveCount)) {
						ninetyVal += timeValue;
						count90 ++;
					}
					
					if (halfAveCount > 0 && i > halfAveCount  && i <= (count - halfAveCount)) {
						halfVal += timeValue;
						count50 ++;
					}
					
					i ++;
				}
				
				if (count99 > 0) {
					ninetyNineAveValue = 1.0 * ninetyNineVal / count99;
				}
				
				if (count90 > 0) {
					ninetyAveValue = 1.0 * ninetyVal / count90;
				}
				
				if (count50 > 0) {
					halfAveValue = 1.0 * halfVal / count50;
				}
				
			}
			
			aveValue = 1.0 * sum / count;
			
			logTotalWritable.setSum(sum);
			logTotalWritable.setAveValue(aveValue);
			logTotalWritable.setMaxValue(maxValue);
			logTotalWritable.setMinValue(minValue);
			logTotalWritable.setHalfAveValue(halfAveValue);
			logTotalWritable.setNinetyAveValue(ninetyAveValue);
			logTotalWritable.setNinetyNineAveValue(ninetyNineAveValue);
			
//			logTotalWritable.setHalfAveValue(halfVal);
//			logTotalWritable.setNinetyAveValue(ninetyVal);
//			logTotalWritable.setNinetyNineAveValue(ninetyNineVal);
			
//			logTotalWritable.setHalfAveValue(halfAveCount);
//			logTotalWritable.setNinetyAveValue(ninetyAveCount);
//			logTotalWritable.setNinetyNineAveValue(ninetyNineAveCount);
			
			resultSet.add(logTotalWritable);
//			context.write(logTotalWritable, NullWritable.get());
		}
		
		@Override
		protected void cleanup(Context context) throws IOException, InterruptedException {
			super.cleanup(context);
			
			for (LogTotalWritable logTotalWritable : resultSet) {
				context.write(logTotalWritable, NullWritable.get());
			}
		}

	}

	@Override
	public int run(String[] args) throws Exception {

		Configuration config = new Configuration();

		Job job = this.parseInputAndOutput(config, args, this);

		// 设置mapper
		job.setMapperClass(LogTotalMapper.class);
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(LogWritable.class);

		// 设置reduce
		job.setReducerClass(LogTotalReduce.class);
		job.setOutputKeyClass(LogTotalWritable.class);
		job.setOutputValueClass(NullWritable.class);

		// =====================shuffle=====================
		// job.setCombinerClass(StandardMapReduce.class);
		// job.setPartitionerClass(cls);
		// job.setGroupingComparatorClass(cls);
		// job.set...
		// =====================shuffle=====================

		// 提交job 等待运行结束 并在客户端显示运行信息
		boolean flag = job.waitForCompletion(true);
		return flag ? 0 : 1;
	}

	/**
	 * 设置输入输出位置，返回job
	 * @param conf 配置
	 * @param args [输入,输出位置]
	 * @param tool
	 * @return
	 * @throws IOException
	 */
	public Job parseInputAndOutput(Configuration conf, String[] args, Tool tool) throws IOException {
		// 设置job
		Job job = Job.getInstance(conf, tool.getClass().getSimpleName());

		// 设置job运行类
		job.setJarByClass(this.getClass());
		
		for (int i = 0; i < args.length; i++) {
			if (i == (args.length -1)) {
				// 设置输出
				Path outputDir = new Path(args[i]);
				FileOutputFormat.setOutputPath(job, outputDir);
			} else {
				// 数据的输入
				Path inputDir = new Path(args[i]);
				FileInputFormat.addInputPath(job, inputDir);
			}
		}

		return job;
	}

	public static void main(String[] args) throws Exception {
		Configuration conf = new Configuration();
		int status = ToolRunner.run(conf, new LogTotalMapReduce(), args);

		// System.exit(0)是将你的整个虚拟机里的内容都停掉了
		// ，而dispose()只是关闭这个窗口，但是并没有停止整个application exit()
		// 。无论如何，内存都释放了！也就是说连JVM都关闭了，内存里根本不可能还有什么东西
		// System.exit(0)是正常退出程序，而System.exit(1)或者说非0表示非正常退出程序
		// System.exit(status)不管status为何值都会退出程序。和return 相比有以下不同点：
		// return是回到上一层，而System.exit(status)是回到最上层
		System.exit(status);
	}

}
