package com.znz.analysis.spark.rdd;

import java.io.Serializable;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;

import javax.annotation.Resource;

import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.broadcast.Broadcast;
import org.springframework.stereotype.Repository;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.znz.analysis.base.BaseRDD;
import com.znz.analysis.bean.Location;
import com.znz.analysis.dao.WebVisitLogDao;
import com.znz.analysis.main.App;
import com.znz.analysis.main.ApplicationContextFactory;
import com.znz.analysis.spark.sparkContext.SparkContext;
import com.znz.analysis.util.GeoLite2Util;
import com.znz.analysis.util.IpAddressUtil;

import scala.Tuple2;

@Repository
public class WebVisitLogRDD implements BaseRDD<JavaRDD<String>>, Serializable {

	private static final long serialVersionUID = 5911635287248866293L;

	private static final Logger logger = Logger.getLogger(WebVisitLogRDD.class.getName());
	private static Map<String, Location> ipMap = new ConcurrentHashMap<String, Location>();

	@Resource
	private transient WebVisitLogDao webVisitLogDao;

	@Override
	public void handler(JavaRDD<String> inputRDD, String hdfsPath) {
		logger.setLevel(Level.INFO);
		logger.info(hdfsPath);
		// 1.过滤空行和不合法记录
		JavaRDD<String> input_filterRDD = inputRDD.filter(i -> {
			String[] fields = i.split(" ");
			boolean is_valid_status_code = fields.length > 8;
			boolean is_not_blank = !i.trim().equals("");
			return is_valid_status_code && is_not_blank;
		}).coalesce(16, true);
		// 2.缓存输入RDD
		JavaRDD<String> input_cacheRDD = input_filterRDD.cache();
		// 3.获取状态码类型
		List<String> statusCodes = getStatusCodes(input_cacheRDD);
		statusCodes.add("total");
		// 4.缓存IP信息
		logger.info("Ip list loading");
		List<String> ipList = input_cacheRDD.map(line -> {
			return line.split(" ")[0];
		}).distinct().collect();
		ipList.forEach(ip -> {
			try {
				if (!ipMap.containsKey(ip)) {
					ipMap.put(ip, GeoLite2Util.getLocationByIp(App.IpQueryUrl, ip));
				}
			} catch (Exception e) {
				e.printStackTrace();
			}
		});
		logger.info("Ip list loaded");
		// 5.保存到log_status_code Collection
		statusCodeCollectionHandler(input_cacheRDD, statusCodes, hdfsPath);
	}

	/**
	 * 获取状态码类型
	 * 
	 * @param sc
	 * 
	 * @return
	 */
	private List<String> getStatusCodes(JavaRDD<String> input) {
		JavaRDD<String> types_rdd = input.map(line -> {
			try {
				String[] fields = line.split(" ");
				return fields[8];
			} catch (Exception e) {
				logger.error(line, e.getCause());
				throw new Exception(line, e.getCause());
			}
		});
		List<String> types_array = types_rdd.distinct().collect();
		return types_array;
	}

	/**
	 * 将处理结果保存到log_status_code collection
	 * 
	 * @see {@link #statusCodeCollectionProcessor}
	 * 
	 * @param inputRDD
	 *            缓存的RDD
	 * @param statusCodes
	 *            状态码列表
	 * @param hdfsPath
	 *            hdfs文件路径
	 * @param dir
	 *            hdfs目录
	 */
	private void statusCodeCollectionHandler(JavaRDD<String> inputRDD, List<String> statusCodes, String hdfsPath) {
		// 1.1 统计带有DDOS字段值
		statusCodeCollectionProcessor(inputRDD, statusCodes, hdfsPath, false);
		// 1.2 统计不带有DDOS字段值
		statusCodeCollectionProcessor(inputRDD, statusCodes, hdfsPath, true);
	}

	/**
	 * 
	 * 将处理结果保存到log_status_code collection
	 * 
	 * @param inputRDD
	 *            缓存的RDD
	 * @param statusCodes
	 *            状态码列表
	 * @param hdfsPath
	 *            hdfs文件路径
	 * @param dir
	 *            hdfs目录
	 * @param withDDOS
	 *            是否排除DDOS字段
	 */
	private void statusCodeCollectionProcessor(JavaRDD<String> inputRDD, List<String> statusCodes, String hdfsPath,
			boolean withDDOS) {
		// 1.遍历状态码类型，统计数量
		statusCodes.forEach(i -> {
			JavaRDD<String> status_code_rdd = inputRDD;
			// 2.筛选指定状态码的记录，排除状态码为“total”
			if (!i.equals("total")) {
				status_code_rdd = inputRDD.filter(line -> {
					return line.split(" ")[8].equals(i);
				}).coalesce(16, true).cache();
			}
			// 3.根据条件是否排除带有DDOS字段的日志条目
			if (!withDDOS) {
				status_code_rdd = status_code_rdd.filter(line -> !line.contains("DDOS_ACCESS_TIMES_TOTAL"))
						.coalesce(16, true).cache();
			}
			// bson Document参数
			Map<String, Object> map = new HashMap<String, Object>();
			// 状态码
			map.put("status_code", i);
			// 指定状态码条数，排除状态码为“total”
			long count = 0;
			try {
				count = status_code_rdd.countAsync().get();
			} catch (InterruptedException e) {
				e.printStackTrace();
			} catch (ExecutionException e) {
				e.printStackTrace();
			}
			map.put("total_count", count);
			// log_timespan && log_domain && hdfs_path
			String hdfs_relative_path = hdfsPath.substring(hdfsPath.indexOf("/", hdfsPath.indexOf("//") + 2));
			map.put("hdfs_path", hdfs_relative_path);
			String file_name = hdfsPath.substring(hdfsPath.lastIndexOf("/") + 1);
			String log_timespan = file_name.split("_")[0];
			map.put("log_timespan", log_timespan);
			String log_domain = file_name.split("_")[1].replace(".log.gz", "").replace(".log", "");
			map.put("log_domain", log_domain);
			// ddos
			map.put("ddos", withDDOS);
			// 4.组装data_size字段，排除状态码为“total”
			JSONArray data_size_array = dataSizeArrayAssembly(status_code_rdd);
			map.put("data_size", data_size_array);
			// 5.组装request_url字段
			JSONArray request_count_array = requestCountArrayAssembly(status_code_rdd);
			map.put("request_count", request_count_array);
			// 6.组装agent_count字段
			JSONArray agent_count_array = agentCountArrayAssembly(status_code_rdd);
			map.put("agent_count", agent_count_array);
			// 7.组装ip_address_count字段
			JSONArray ip_address_count_array = ipAddressCountArrayAssembly(status_code_rdd, false);
			map.put("ip_address_count", ip_address_count_array);
			// 8.save_time
			SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
			String save_time = sdf.format(new Date());
			map.put("save_time", save_time);
			// 9.保存到log_status_code Collection
			webVisitLogDao.save(map, "log_status_code");
		});
	}

	/**
	 * 组装data_size字段
	 * 
	 * @param status_code_rdd
	 *            过滤后要操作的RDD
	 * @param bson_map
	 *            bson map参数
	 */
	@SuppressWarnings({ "deprecation" })
	private JSONArray dataSizeArrayAssembly(JavaRDD<String> status_code_rdd) {
		// 1.将原始的RDD转为data_size和time的pair，包含重复,并给RDD中的每个key都打上一个随机前缀
		JavaPairRDD<String, Long> random_prefix_rdd = status_code_rdd.mapPartitionsToPair(lines -> {
			List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
			while (lines.hasNext()) {
				String line = lines.next();
				int beginIndex = line.indexOf("[") + 1;
				int endIndex = line.indexOf("]");
				// pair的first为以5分钟为间隔的时间，second为以5分钟为间隔的流量和.
				if (beginIndex > -1 && endIndex > -1) {
					String time_str = line.substring(beginIndex, endIndex);
					SimpleDateFormat sdf = new SimpleDateFormat("dd/MMMM/yyyy:HH:mm:ss", Locale.US);
					Date date = sdf.parse(time_str);
					int min = date.getMinutes() / 5;
					date.setMinutes(min * 5);
					SimpleDateFormat sdf_lower = new SimpleDateFormat("yyyy-MM-dd HH:mm:00", Locale.US);
					Calendar calendar = Calendar.getInstance();
					calendar.setTime(sdf_lower.parse(sdf_lower.format(date)));
					calendar.add(Calendar.MINUTE, 5);
					String high_time_str = sdf_lower.format(calendar.getTime());
					// System.out.println("5分钟后的时间：" + high_time_str);
					Long data_size = Long.valueOf(line.split(" ")[9]);
					// 给RDD中的每个key都打上一个随机前缀
					Random random = new Random();
					int prefix = random.nextInt(10);
					list.add(new Tuple2<String, Long>(prefix + "`#~#~#~#`" + high_time_str, data_size));
				}
			}
			return list;
		});
		// 3.对打上随机前缀的key进行局部聚合
		JavaPairRDD<String, Long> local_aggr_rdd = random_prefix_rdd.reduceByKey((i1, i2) -> i1 + i2);
		// 4.去除RDD中每个key的随机前缀
		JavaPairRDD<String, Long> removed_random_prefix_rdd = local_aggr_rdd.mapPartitionsToPair(lines -> {
			List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
			while (lines.hasNext()) {
				Tuple2<String, Long> tuple = lines.next();
				String originalKey = tuple._1.split("`#~#~#~#`")[1];
				list.add(new Tuple2<String, Long>(originalKey, tuple._2));
			}
			return list;
		});
		// 5.对去除了随机前缀的RDD进行全局聚合
		JavaPairRDD<String, Long> global_aggr_rdd = removed_random_prefix_rdd.reduceByKey((i1, i2) -> i1 + i2);
		// 6.将所有的pair中first相等的元素，对其second进行累加求和。返回的pair<first,second>就是pair<时间，流量和>.
		JavaPairRDD<String, Long> datasize_total_pair_rdd = global_aggr_rdd.reduceByKey((v1, v2) -> v1 + v2);
		// 7.按照日期升序排列
		JavaPairRDD<String, Long> datasize_total_pair_rdd_sorted = datasize_total_pair_rdd.sortByKey();
		// 8.组装成JSONArray
		List<Tuple2<String, Long>> output = datasize_total_pair_rdd_sorted.collect();
		JSONArray data_size_array = new JSONArray();
		for (Tuple2<?, ?> tuple : output) {
			JSONObject jsonObject = new JSONObject();
			if (tuple != null && tuple._1() != null) {
				jsonObject.put("date", tuple._1().toString());
				jsonObject.put("size", tuple._2());
				data_size_array.add(jsonObject);
			} else {
				continue;
			}
		}
		return data_size_array;
	}

	/**
	 * 组装agent_count字段
	 * 
	 * @param status_code_rdd
	 * @return
	 */
	private JSONArray agentCountArrayAssembly(JavaRDD<String> status_code_rdd) {
		// 1.字段过滤
		JavaRDD<String> status_code_rdd_filter = status_code_rdd.filter(line -> {
			return !line.split(" ")[11].contains("Zabbix");
		}).coalesce(16, true).cache();
		// 2.将原始的RDD转为location_str，包含重复
		JavaPairRDD<String, Long> random_prefix_rdd = status_code_rdd_filter.mapPartitionsToPair(lines -> {
			List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
			while (lines.hasNext()) {
				String line = lines.next();
				int beginIndex = line.indexOf(line.split(" ")[11]);
				String agent_str = line.substring(beginIndex).split("\"")[1];
				// 给RDD中的每个key都打上一个随机前缀
				Random random = new Random();
				int prefix = random.nextInt(10);
				list.add(new Tuple2<String, Long>(prefix + "`#~#~#~#`" + agent_str, 1l));
			}
			return list;
		});
		// 3.对打上随机前缀的key进行局部聚合
		JavaPairRDD<String, Long> local_aggr_rdd = random_prefix_rdd.reduceByKey((i1, i2) -> i1 + i2);
		// 4.去除RDD中每个key的随机前缀
		JavaPairRDD<String, Long> removed_random_prefix_rdd = local_aggr_rdd.mapPartitionsToPair(lines -> {
			List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
			while (lines.hasNext()) {
				Tuple2<String, Long> tuple = lines.next();
				String originalKey = tuple._1.split("`#~#~#~#`")[1];
				list.add(new Tuple2<String, Long>(originalKey, tuple._2));
			}
			return list;
		});
		// 5.对去除了随机前缀的RDD进行全局聚合
		JavaPairRDD<String, Long> global_aggr_rdd = removed_random_prefix_rdd.reduceByKey((i1, i2) -> i1 + i2);
		// 6.将所有的pair中first相等的元素，对其second进行累加求和。返回的pair<first,second>就是pair<agent，count>.
		JavaPairRDD<String, Long> agent_count_total_rdd = global_aggr_rdd.reduceByKey((i1, i2) -> i1 + i2);
		// 7.交换key、value，进行key排序，交换回来，完成value排序
		JavaPairRDD<String, Long> keyValueConvertPairRDD = agent_count_total_rdd
				// 交换key-value，注意类型
				.mapPartitionsToPair(lines -> {
					List<Tuple2<Long, String>> list = new ArrayList<Tuple2<Long, String>>();
					while (lines.hasNext()) {
						Tuple2<String, Long> tuple = lines.next();
						list.add(new Tuple2<Long, String>(tuple._2, tuple._1));
					}
					return list;
				})
				// 倒序
				.sortByKey(false)
				// 交换key-value，注意类型
				.mapPartitionsToPair(lines -> {
					List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
					while (lines.hasNext()) {
						Tuple2<Long, String> tuple = lines.next();
						list.add(new Tuple2<String, Long>(tuple._2, tuple._1));
					}
					return list;
				});
		// 8.组装成JSONArray
		List<Tuple2<String, Long>> output = keyValueConvertPairRDD.take(10);
		JSONArray agent_count_array_top10 = new JSONArray();
		for (Tuple2<?, ?> tuple : output) {
			JSONObject jsonObject = new JSONObject();
			if (tuple != null && tuple._1() != null) {
				jsonObject.put("agent", tuple._1().toString());
				jsonObject.put("count", tuple._2());
				agent_count_array_top10.add(jsonObject);
			} else {
				continue;
			}
		}
		return agent_count_array_top10;
	}

	/**
	 * 组装ip_address_count字段
	 * 
	 * @param status_code_rdd
	 * @return
	 */
	private JSONArray ipAddressCountArrayAssembly(JavaRDD<String> status_code_rdd, boolean isIgnoreCity) {
		// 1.字段过滤
		JavaRDD<String> status_code_rdd_filter = status_code_rdd.filter(line -> {
			return !line.split(" ")[11].contains("Zabbix");
		});
		status_code_rdd_filter.coalesce(16, true).cache();

		Broadcast<Map<String, Location>> broadcast = ((SparkContext) ApplicationContextFactory.getInstance()
				.getBean(SparkContext.class)).getContext().broadcast(ipMap);
		// 2.将原始的RDD转为agent_str，包含重复。
		JavaPairRDD<String, Tuple2<Long, Long>> random_prefix_rdd = status_code_rdd_filter
				.mapPartitionsToPair(lines -> {
					List<Tuple2<String, Tuple2<Long, Long>>> list = new ArrayList<Tuple2<String, Tuple2<Long, Long>>>();
					while (lines.hasNext()) {
						String line = lines.next();
						String ip = line.split(" ")[0];
						Long data_size = Long.valueOf(line.split(" ")[9]);
						// 根据IP查询地理位置
						String location = broadcast.value().get(ip).toString(isIgnoreCity);
						System.out.println("location:" + location + ", ip=" + ip);
						// 给RDD中的每个key都打上一个随机前缀
						Random random = new Random();
						int prefix = random.nextInt(10);
						list.add(new Tuple2<String, Tuple2<Long, Long>>(prefix + "`#~#~#~#`" + location,
								new Tuple2<Long, Long>(1l, data_size)));
					}
					return list;
				});
		// 3.对打上随机前缀的key进行局部聚合
		JavaPairRDD<String, Tuple2<Long, Long>> local_aggr_rdd = random_prefix_rdd
				.reduceByKey((i1, i2) -> new Tuple2<Long, Long>(i1._1 + i2._1, i1._2 + i2._2));
		// 4.去除RDD中每个key的随机前缀
		JavaPairRDD<String, Tuple2<Long, Long>> removed_random_prefix_rdd = local_aggr_rdd
				.mapPartitionsToPair(lines -> {
					List<Tuple2<String, Tuple2<Long, Long>>> list = new ArrayList<Tuple2<String, Tuple2<Long, Long>>>();
					while (lines.hasNext()) {
						Tuple2<String, Tuple2<Long, Long>> tuple = lines.next();
						String originalKey = tuple._1.split("`#~#~#~#`")[1];
						list.add(new Tuple2<String, Tuple2<Long, Long>>(originalKey, tuple._2));
					}
					return list;
				});
		// 5.对去除了随机前缀的RDD进行全局聚合
		JavaPairRDD<String, Tuple2<Long, Long>> global_aggr_rdd = removed_random_prefix_rdd
				.reduceByKey((i1, i2) -> new Tuple2<Long, Long>(i1._1 + i2._1, i1._2 + i2._2));
		// 6.将所有的pair中first相等的元素，对其second进行累加求和。返回的pair<first,second>就是pair<ip_address，count>.
		JavaPairRDD<String, Tuple2<Long, Long>> ip_address_count_total_rdd = global_aggr_rdd.reduceByKey((v1, v2) -> {
			Long new_count = v1._1() + v2._1();
			Long new_size = v1._2 + v2._2();
			return new Tuple2<Long, Long>(new_count, new_size);
		});
		/*
		 * 7.按照请求次数降序排列 交换key、value，进行key排序，交换回来，完成value排序
		 */
		JavaPairRDD<String, Tuple2<Long, Long>> keyValueConvertPairRDD = ip_address_count_total_rdd
				// 交换key-value，注意类型
				.mapPartitionsToPair(lines -> {
					List<Tuple2<Long, Tuple2<String, Long>>> list = new ArrayList<Tuple2<Long, Tuple2<String, Long>>>();
					while (lines.hasNext()) {
						Tuple2<String, Tuple2<Long, Long>> tuple = lines.next();
						list.add(new Tuple2<Long, Tuple2<String, Long>>(tuple._2._1,
								new Tuple2<String, Long>(tuple._1, tuple._2._2)));
					}
					return list;
				})
				// 倒序
				.sortByKey(false)
				// 交换key-value，注意类型
				.mapPartitionsToPair(lines -> {
					List<Tuple2<String, Tuple2<Long, Long>>> list = new ArrayList<Tuple2<String, Tuple2<Long, Long>>>();
					while (lines.hasNext()) {
						Tuple2<Long, Tuple2<String, Long>> tuple = lines.next();
						list.add(new Tuple2<String, Tuple2<Long, Long>>(tuple._2._1,
								new Tuple2<Long, Long>(tuple._1, tuple._2._2)));
					}
					return list;
				});
		// 8.组装成JSONArray
		List<Tuple2<String, Tuple2<Long, Long>>> output = null;
		if (isIgnoreCity) {
			output = keyValueConvertPairRDD.take(10);
		} else {
			output = keyValueConvertPairRDD.collect();
		}
		JSONArray agent_count_array = new JSONArray();
		for (Tuple2<String, Tuple2<Long, Long>> tuple : output) {
			JSONObject jsonObject = new JSONObject();
			// 组装“国家--省--市”字符串成json
			JSONObject location_obj = IpAddressUtil.convertString2Json(tuple._1().toString(), isIgnoreCity);
			jsonObject.put("location", location_obj);
			jsonObject.put("count", tuple._2()._1);
			jsonObject.put("size", tuple._2()._2);
			agent_count_array.add(jsonObject);
		}
		return agent_count_array;
	}

	/**
	 * 组装request_count字段
	 * 
	 * @param status_code_rdd
	 * @return
	 */
	private JSONArray requestCountArrayAssembly(JavaRDD<String> status_code_rdd) {
		// 1.字段过滤
		JavaRDD<String> status_code_rdd_filter = status_code_rdd.filter(line -> {
			return !line.split(" ")[11].contains("Zabbix");
		}).coalesce(16, true).cache();
		// 2.将原始的RDD转为request_url，包含重复。
		JavaPairRDD<String, Long> random_prefix_rdd = status_code_rdd_filter.mapPartitionsToPair(lines -> {
			List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
			while (lines.hasNext()) {
				String line = lines.next();
				// 给RDD中的每个key都打上一个随机前缀
				Random random = new Random();
				int prefix = random.nextInt(10);
				list.add(new Tuple2<String, Long>(prefix + "`#~#~#~#`" + line.split(" ")[6].replaceAll("\"", ""), 1l));
			}
			return list;
		});
		// 3.对打上随机前缀的key进行局部聚合
		JavaPairRDD<String, Long> local_aggr_rdd = random_prefix_rdd.reduceByKey((i1, i2) -> i1 + i2);
		// 4.去除RDD中每个key的随机前缀
		JavaPairRDD<String, Long> removed_random_prefix_rdd = local_aggr_rdd.mapPartitionsToPair(lines -> {
			List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
			while (lines.hasNext()) {
				Tuple2<String, Long> tuple = lines.next();
				String originalKey = tuple._1.split("`#~#~#~#`")[1];
				list.add(new Tuple2<String, Long>(originalKey, tuple._2));
			}
			return list;
		});
		// 5.对去除了随机前缀的RDD进行全局聚合
		JavaPairRDD<String, Long> global_aggr_rdd = removed_random_prefix_rdd.reduceByKey((i1, i2) -> i1 + i2);
		// 6.将所有的pair中first相等的元素，对其second进行累加求和。返回的pair<first,second>就是pair<request，count>.
		JavaPairRDD<String, Long> request_count_total_rdd = global_aggr_rdd.reduceByKey((v1, v2) -> v1 + v2);
		// 7.交换key、value，进行key排序，交换回来，完成value排序
		JavaPairRDD<String, Long> keyValueConvertPairRDD = request_count_total_rdd
				// 交换key-value，注意类型
				.mapPartitionsToPair(lines -> {
					List<Tuple2<Long, String>> list = new ArrayList<Tuple2<Long, String>>();
					while (lines.hasNext()) {
						Tuple2<String, Long> tuple = lines.next();
						list.add(new Tuple2<Long, String>(tuple._2, tuple._1));
					}
					return list;
				})
				// 倒序
				.sortByKey(false)
				// 交换key-value，注意类型
				.mapPartitionsToPair(lines -> {
					List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
					while (lines.hasNext()) {
						Tuple2<Long, String> tuple = lines.next();
						list.add(new Tuple2<String, Long>(tuple._2, tuple._1));
					}
					return list;
				});
		// 8.组装成JSONArray
		List<Tuple2<String, Long>> output = keyValueConvertPairRDD.take(10);
		JSONArray request_url_array_top10 = new JSONArray();
		for (Tuple2<?, ?> tuple : output) {
			JSONObject jsonObject = new JSONObject();
			if (tuple != null && tuple._1() != null) {
				jsonObject.put("request", tuple._1().toString());
				jsonObject.put("count", tuple._2());
				request_url_array_top10.add(jsonObject);
			} else {
				continue;
			}
		}
		// 9.对前10的数据，根据请求的url字段对RDD重新进行过滤，筛选出IP信息
		for (int i = 0; i < request_url_array_top10.size(); i++) {
			JSONObject jsonObject = (JSONObject) request_url_array_top10.get(i);
			JavaRDD<String> status_code_rdd_by_request = status_code_rdd_filter.filter(line -> {
				String str = line.split(" ")[6];
				return str.equals(jsonObject.getString("request"));
			}).coalesce(16, true).cache();
			JSONArray ip_address_count_array = ipAddressCountArrayAssembly(status_code_rdd_by_request, true);
			jsonObject.put("ip_address_count", ip_address_count_array);
			JSONArray ip_count_array = ipCountArrayAssembly(status_code_rdd_by_request);
			jsonObject.put("ip_count", ip_count_array);
			// 根据请求的url字段对RDD重新进行过滤，筛选出referer信息
			JSONArray referer_address_count_array = refererCountArrayAssembly(status_code_rdd_by_request);
			jsonObject.put("referer_count", referer_address_count_array);
		}
		return request_url_array_top10;
	}

	private JSONArray ipCountArrayAssembly(JavaRDD<String> status_code_rdd) {
		// 1.字段过滤
		JavaRDD<String> status_code_rdd_filter = status_code_rdd.filter(line -> {
			return !line.split(" ")[11].contains("Zabbix");
		}).coalesce(16, true).cache();
		// 2.将原始的RDD转为agent_str，包含重复。
		JavaPairRDD<String, Long> random_prefix_rdd = status_code_rdd_filter.mapPartitionsToPair(lines -> {
			List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
			while (lines.hasNext()) {
				String line = lines.next();
				String ip = line.split(" ")[0];
				// 给RDD中的每个key都打上一个随机前缀
				Random random = new Random();
				int prefix = random.nextInt(10);
				list.add(new Tuple2<String, Long>(prefix + "`#~#~#~#`" + ip, 1l));
			}
			return list;
		});
		// 3.对打上随机前缀的key进行局部聚合
		JavaPairRDD<String, Long> local_aggr_rdd = random_prefix_rdd.reduceByKey((i1, i2) -> i1 + i2);
		// 4.去除RDD中每个key的随机前缀
		JavaPairRDD<String, Long> removed_random_prefix_rdd = local_aggr_rdd.mapPartitionsToPair(lines -> {
			List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
			while (lines.hasNext()) {
				Tuple2<String, Long> tuple = lines.next();
				String originalKey = tuple._1.split("`#~#~#~#`")[1];
				list.add(new Tuple2<String, Long>(originalKey, tuple._2));
			}
			return list;
		});
		// 5.对去除了随机前缀的RDD进行全局聚合
		JavaPairRDD<String, Long> global_aggr_rdd = removed_random_prefix_rdd.reduceByKey((i1, i2) -> i1 + i2);
		// 6.将所有的pair中first相等的元素，对其second进行累加求和。返回的pair<first,second>就是pair<ip_address，count>.
		JavaPairRDD<String, Long> ip_count_total_rdd = global_aggr_rdd.reduceByKey((v1, v2) -> v1 + v2);
		// 7.交换key、value，进行key排序，交换回来，完成value排序
		JavaPairRDD<String, Long> keyValueConvertPairRDD = ip_count_total_rdd
				// 交换key-value，注意类型
				.mapPartitionsToPair(lines -> {
					List<Tuple2<Long, String>> list = new ArrayList<Tuple2<Long, String>>();
					while (lines.hasNext()) {
						Tuple2<String, Long> tuple = lines.next();
						list.add(new Tuple2<Long, String>(tuple._2, tuple._1));
					}
					return list;
				})
				// 倒序
				.sortByKey(false)
				// 交换key-value，注意类型
				.mapPartitionsToPair(lines -> {
					List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
					while (lines.hasNext()) {
						Tuple2<Long, String> tuple = lines.next();
						list.add(new Tuple2<String, Long>(tuple._2, tuple._1));
					}
					return list;
				});
		// 8.组装成JSONArray
		List<Tuple2<String, Long>> output = keyValueConvertPairRDD.take(10);
		JSONArray ip_count_array_top10 = new JSONArray();
		for (Tuple2<?, ?> tuple : output) {
			JSONObject jsonObject = new JSONObject();
			if (tuple != null && tuple._1() != null) {
				String ip = tuple._1().toString();
				jsonObject.put("ip", ip);
				jsonObject.put("count", tuple._2());
				// 根据IP查询地理位置
				try {
					if (ipMap.containsKey(ip)) {
						String response = ipMap.get(ip).toString(false);
						JSONObject location_obj = IpAddressUtil.convertString2Json(response, false);
						jsonObject.put("location", location_obj);
					} else {
						jsonObject.put("location", new Location().toString(false));
					}
				} catch (Exception e) {
					e.printStackTrace();
				}
				ip_count_array_top10.add(jsonObject);
			} else {
				continue;
			}
		}
		return ip_count_array_top10;
	}

	/**
	 * 组装referer_count字段
	 * 
	 * @param status_code_rdd
	 * @return
	 */
	private JSONArray refererCountArrayAssembly(JavaRDD<String> status_code_rdd) {
		// 1.字段过滤
		JavaRDD<String> status_code_rdd_filter = status_code_rdd.filter(line -> {
			return !line.split(" ")[11].contains("Zabbix");
		}).coalesce(16, true).cache();
		// 2.将原始的RDD转为location_str，包含重复。
		JavaPairRDD<String, Long> random_prefix_rdd = status_code_rdd_filter.mapPartitionsToPair(lines -> {
			List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
			while (lines.hasNext()) {
				String line = lines.next();
				int beginIndex = line.indexOf(line.split(" ")[10]);
				String referer_str = line.substring(beginIndex).split("\"")[1];
				// 给RDD中的每个key都打上一个随机前缀
				Random random = new Random();
				int prefix = random.nextInt(10);
				list.add(new Tuple2<String, Long>(prefix + "`#~#~#~#`" + referer_str, 1l));
			}
			return list;
		});
		// 3.对打上随机前缀的key进行局部聚合
		JavaPairRDD<String, Long> local_aggr_rdd = random_prefix_rdd.reduceByKey((i1, i2) -> i1 + i2);
		// 4.去除RDD中每个key的随机前缀
		JavaPairRDD<String, Long> removed_random_prefix_rdd = local_aggr_rdd.mapPartitionsToPair(lines -> {
			List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
			while (lines.hasNext()) {
				Tuple2<String, Long> tuple = lines.next();
				String originalKey = tuple._1.split("`#~#~#~#`")[1];
				list.add(new Tuple2<String, Long>(originalKey, tuple._2));
			}
			return list;
		});
		// 5.对去除了随机前缀的RDD进行全局聚合
		JavaPairRDD<String, Long> global_aggr_rdd = removed_random_prefix_rdd.reduceByKey((i1, i2) -> i1 + i2);
		// 6.将所有的pair中first相等的元素，对其second进行累加求和。返回的pair<first,second>就是pair<agent，count>.
		JavaPairRDD<String, Long> referer_count_total_rdd = global_aggr_rdd.reduceByKey((i1, i2) -> i1 + i2);
		// 7.交换key、value，进行key排序，交换回来，完成value排序
		JavaPairRDD<String, Long> keyValueConvertPairRDD = referer_count_total_rdd
				// 交换key-value，注意类型
				.mapPartitionsToPair(lines -> {
					List<Tuple2<Long, String>> list = new ArrayList<Tuple2<Long, String>>();
					while (lines.hasNext()) {
						Tuple2<String, Long> tuple = lines.next();
						list.add(new Tuple2<Long, String>(tuple._2, tuple._1));
					}
					return list;
				})
				// 倒序
				.sortByKey(false)
				// 交换key-value，注意类型
				.mapPartitionsToPair(lines -> {
					List<Tuple2<String, Long>> list = new ArrayList<Tuple2<String, Long>>();
					while (lines.hasNext()) {
						Tuple2<Long, String> tuple = lines.next();
						list.add(new Tuple2<String, Long>(tuple._2, tuple._1));
					}
					return list;
				});
		// 8.组装成JSONArray
		List<Tuple2<String, Long>> output = keyValueConvertPairRDD.take(10);
		JSONArray referer_count_array_top10 = new JSONArray();
		for (Tuple2<?, ?> tuple : output) {
			JSONObject jsonObject = new JSONObject();
			if (tuple != null && tuple._1() != null) {
				jsonObject.put("referer", tuple._1().toString());
				jsonObject.put("count", tuple._2());
				referer_count_array_top10.add(jsonObject);
			} else {
				continue;
			}
		}
		return referer_count_array_top10;
	}

	public void clearIpMap() {
		ipMap.clear();
	}
}
