package Person;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import dataclear.TProperties;

public class Person {
	public static void main(String[] args) throws IOException, InterruptedException{
		try {
			//创建配置信息
			Configuration conf = new Configuration();
			//map内存设置
			conf.set("mapreduce.map.memory.mb", "5120");
			conf.set("mapreduce.reduce.memory.mb", "5120");
			//不检查超时，由于集群环境不稳定有超时现象，所以设置为不检查，但需要保证程序无死循环
			//conf.set("mapred.task.timeout", "0");
			//集群机器少的时候可以设置：客户端在写失败的时候，是否使用更换策略
			//conf.set("dfs.client.block.write.replace-datanode-on-failure.policy","NEVER"); 
			//conf.set("dfs.client.block.write.replace-datanode-on-failure.enable","true"); 
			// 获取命令行的参数
			String[] otherArgs = {"F://sparkData//configData//t_dx_basic_classify_link.txt", "F://sparkData//test//03.DxCountPV/part-r-00*", "F://sparkData//test//09.DxPerson"};
			//String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
			// 创建任务
			Job job = new Job(conf, "Dx_Person");
			// 打成jar包运行，这句话是关键
			job.setJarByClass(Person.class);
			// 自定义Mapper类和设置map函数输出数据的key和value的类型
			job.setMapperClass(PersonMapper.class);
			job.setReducerClass(PersonReducer.class);
			 //设置map输出的key类型
			job.setMapOutputKeyClass(Text.class);
			//设置map输出的value类型
			job.setMapOutputValueClass(Text.class);
			//设置输出的key类型
			job.setOutputKeyClass(NullWritable.class);
			//设置输出的value类型
			job.setOutputValueClass(Text.class);
			// 分组函数
			//job.setPartitionerClass(KeyPartitioner.class);
			// 分组函数
			//job.setGroupingComparatorClass(KeyGroupingComparator.class);
			//输入输出路径
			FileInputFormat.setInputPaths(job, otherArgs[1]);
			job.addCacheFile(new Path(otherArgs[0]).toUri());
			FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));
			//提交作业 判断退出条件（0正常退出，1非正常退出）
			System.exit(job.waitForCompletion(true) ? 0 : 1);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
	public static class PersonMapper extends Mapper<LongWritable, Text, Text, Text>{
		private Text okey = new Text();
		private Text ovalue = new Text();
		// 分类关系地址库
		private Map<String, String> typeData = new HashMap<String, String>();
		// 存储每个map用户分类记录数
		private Map<String, Integer> userType = new HashMap<String, Integer>();
		private String typeKey;
		
		// 预先加载
		@Override
		protected void setup(Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException{
			Path file = new Path(context.getCacheFiles()[0].getPath());
			BufferedReader br = new BufferedReader(new FileReader(file.toString()));
			String str = null;
			try {
				// 每一行读取
				while ((str = br.readLine()) != null) {
					String[] splits = str.split(TProperties.getValue("fileoutsplit"));
					// 行为ID，分类ID
					typeData.put(splits[1], splits[0]);
				}
			} catch (Exception e) {
				e.printStackTrace();
			} finally {
				br.close();
			}
		}
		
		@Override
		public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
			String[] values = value.toString().split(TProperties.getValue("fileoutsplit"));
			// 行为ID，用户标识， PV数
			// 判断包含行为ID对应的分类ID
			if (typeData.containsKey(values[0])) {
				// key: 分类ID，用户ID
				typeKey = typeData.get(values[0]) + "," + values[1];
				if (userType.containsKey(typeKey)) {
					// 累加key次数
					userType.put(typeKey, userType.get(typeKey) + Integer.parseInt(values[2]));
				} else {
					// 加key次数
					userType.put(typeKey, Integer.parseInt(values[2]));
				}
			}
		}
		
		// map之后执行， 输出map累加数据结果
		@Override
		protected void cleanup(Context context) throws IOException,InterruptedException {
			for (String key : userType.keySet()) {
				// 分类ID
				okey = new Text(key.split(",", -1)[0]);
				ovalue = new Text (key.split(",", -1)[1] + TProperties.getValue("outfilesplit") + userType.get(key));
				// 分类ID  用户ID，访问次数
				context.write(okey, ovalue);
			}
		}
	}
	
	
	
	public static class PersonReducer extends Reducer<Text, Text, NullWritable, Text> {
		private Text result = new Text();
		
		@Override
		public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException{
			// PV数
			int sumPV = 0;
			// 人数
			int sumN = 0;
			// 数据
			Map<String, Integer> map = new HashMap<String, Integer>();
			
			// 计算总PV
			for (Text val : values) {
				// val : 用户ID， 访问次数
				// str : 用户ID, 访问次数
				String[] str = val.toString().split(TProperties.getValue("fileoutsplit"));
				sumPV = sumPV + Integer.parseInt(str[1]);
				if (map.containsKey(str[0])) {
					// 用户访问数
					map.put(str[0], map.get(str[0]) + Integer.parseInt(str[1]));
				} else {
					// 用户访问数
					map.put(str[0], Integer.parseInt(str[1]));
					// 人数
					sumN = sumN + 1;
				}
			}
			// z-score
			// 平方累加和
			double math2 = 0;
			// 均值
			double avg = sumPV / sumN;
			// 方差分子
			for (String s : map.keySet()) {
				math2 = math2 + Math.pow(map.get(s) - avg, 2);
			}
			// 标准差
			double fc = Math.sqrt(math2 / sumN);
			// 遍历数据
			for (String skey : map.keySet()) {
				// z-score标准化
				double score = (map.get(skey) -avg) / fc;
				// 数据平移
				score = score + 5;
				// 上限为10
				if (score > 10) {
					score = 10;
				}
				// 下限为0
				if (score < 0) {
					score = 0;
				}
				result = new Text(skey + TProperties.getValue("outfilesplit") + key.toString() +
						TProperties.getValue("outfilesplit") + map.get(skey) +
						TProperties.getValue("outfilesplit") + sumPV +
						TProperties.getValue("outfilesplit") + score);
				context.write(NullWritable.get(), result);
			}
		}
	}
}
