package com.ustcinfo.study.worker1.r4;
/**
 * 用于处理文档数据，并将结果写入hdfs
 * 
 * @author 徐宗宝、汪强强、王志刚、师红红
 * 
 */
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

public class BigDataHandler {
	public static void main(String[] args) {
		/*
		 * 创建spark的相关功能类
		 */
		SparkConf sparkConf = new SparkConf().setAppName("bigdata-handler");
		JavaSparkContext jsc = new JavaSparkContext(sparkConf);
		SQLContext sqlContext = new SQLContext(jsc);

//		JavaRDD<String> rdd1 = jsc.textFile("file:///G:\\bigdata\\100_1_20170207160030");
//		JavaRDD<String> rdd2 = jsc.textFile("file:///G:\\bigdata\\100_1_20170207160031");

		//读取日志数据
		JavaRDD<String> rdd = jsc.textFile(args[0]);
		
		//读取地区数据
		List<String> list = jsc.textFile(args[1]).filter(new Function<String, Boolean>() {
			private static final long serialVersionUID = 1L;

			@Override
			public Boolean call(String arg0) throws Exception {
				return arg0.trim().length()>0;
			}
		}).collect();

		/*
		 * 将数据拆成”11.11.11.11-11.11.11.11,11.11.11.11“格式，方便后面调用判断ip
		 * 是否在某区段的函数
		 */
		final Map<String,String> city_map = new HashMap<String,String>();
		for(String s:list) {
			String[] arr = s.trim().split("	");
			city_map.put(arr[0]+"-"+arr[1], arr[2]);
		}
		final Set<String> city_set = new HashSet<String>(city_map.keySet());//包含所有区段
		Collection<String> cityName = city_map.values();//包含所有的地区，存在重复名称
		Set<String> cityNameSet = new HashSet<String>(cityName);//获得不重复的地区名称


//		JavaRDD<String> rdd = rdd1.union(rdd2);//将两个rdd进行合并
		JavaRDD<Row> rowRDD = rdd.filter(new Function<String, Boolean>() {
			private static final long serialVersionUID = 1L;
			//先将不是指定格式的垃圾数据过滤掉
			@Override
			public Boolean call(String arg0) throws Exception {
				return arg0.trim().split("\\|").length == 5;
			}
		}).map(new Function<String, Row>() {
			private static final long serialVersionUID = 1L;
			//将每一条数据都转化为row格式
			@Override
			public Row call(String arg0) throws Exception {
				String[] arr = arg0.split("\\|");
				/*
				 *此处通过遍历的方法判断输入的ip存在于哪个ip区段内，并锁定地区，将地区名称加入日志数据
				 *进而得到基础表，字段如下：
				 *ip host time  route_chain status location
				 *z这样便于后面写sql进行分析 
				 */
				for(String s:city_set) {
					boolean flag = ipIsValid(s,arr[0]);
					if(flag) {
						Row row = RowFactory.create(arr[0],arr[1],arr[2],arr[3],arr[4],city_map.get(s));
						return row;
					}
				}
				return null;
				
			}

		}).filter(new Function<Row, Boolean>() {
			private static final long serialVersionUID = 1L;
			//将上面得到的row类型数据进行删选，去除掉null数据
			@Override
			public Boolean call(Row arg0) throws Exception {
				return arg0 != null;
			}
		});

		//创建表格数据格式
		List<StructField> fields = new ArrayList<StructField>();
		fields.add(DataTypes.createStructField("ip", DataTypes.StringType, true));
		fields.add(DataTypes.createStructField("host", DataTypes.StringType, true));
		fields.add(DataTypes.createStructField("time", DataTypes.StringType, true));
		fields.add(DataTypes.createStructField("route_chain", DataTypes.StringType, true));
		fields.add(DataTypes.createStructField("status", DataTypes.StringType, true));
		fields.add(DataTypes.createStructField("location", DataTypes.StringType, true));
		
		StructType schema = DataTypes.createStructType(fields);
		DataFrame df1 = sqlContext.createDataFrame(rowRDD, schema);
		df1.registerTempTable("A");//将基础数据映射成一张表
		DataFrame df2 = sqlContext.sql("select location,host,count(*) as size from A group by location,host");
		df2.registerTempTable("B");
		
		/*
		 * 此处是以上面得到的不重复的地区名称作为查询条件，对数据进行迭代查询，只取前十条记录，并
		 * 将查询结果存入hdfs
		 */
		int mask = 0;
		for(String s:cityNameSet) {
			DataFrame df3 = sqlContext.sql("select * from B where location = '"+s+"' order by size desc limit 10");
			df3.toJavaRDD().saveAsTextFile(args[2]+mask);
			mask++;
		}
		

	}

	/*
	 * 判断IP是否在指定范围的方法  
	 */  

	public static boolean ipIsValid(String ipSection, String ip) {   
		if (ipSection == null)   
			throw new NullPointerException("IP段不能为空！");   
		if (ip == null)   
			throw new NullPointerException("IP不能为空！");   
		ipSection = ipSection.trim();   
		ip = ip.trim();   
		final String REGX_IP = "((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]\\d|\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]\\d|\\d)";   
		final String REGX_IPB = REGX_IP + "\\-" + REGX_IP;   
		if (!ipSection.matches(REGX_IPB) || !ip.matches(REGX_IP))   
			return false;   
		int idx = ipSection.indexOf('-');   
		String[] sips = ipSection.substring(0, idx).split("\\.");   
		String[] sipe = ipSection.substring(idx + 1).split("\\.");   
		String[] sipt = ip.split("\\.");   
		long ips = 0L, ipe = 0L, ipt = 0L;   
		for (int i = 0; i < 4; ++i) {   
			ips = ips << 8 | Integer.parseInt(sips[i]);   
			ipe = ipe << 8 | Integer.parseInt(sipe[i]);   
			ipt = ipt << 8 | Integer.parseInt(sipt[i]);   
		}   
		if (ips > ipe) {   
			long t = ips;   
			ips = ipe;   
			ipe = t;   
		}   
		return ips <= ipt && ipt <= ipe;   
	}   



}
