package com.lvmama.java.rhino.etl.process;

import java.util.HashMap;
import java.util.Map;

import org.apache.log4j.Logger;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;

import com.lvmama.java.rhino.etl.core.AbstractKafkaSparkStreamingTemplate;
import com.lvmama.java.rhino.spark.utils.Constants;
import com.lvmama.java.rhino.spark.utils.LogParseUtils;
import com.lvmama.java.rhino.spark.utils.LogProcessorUtil;

import scala.Tuple2;

public class SparkStreamingMergeLogProcesser extends AbstractKafkaSparkStreamingTemplate {

	private static final long serialVersionUID = -1422826550552162282L;
	
	private static final Logger LOGGER = Logger.getLogger(SparkStreamingMergeLogProcesser.class);
	
	@Override
	public void excute(JavaDStream<String> lines) {
		// 对每一行日志进行过滤，过滤掉没有Flume线程编号的日志
		lines = lines.filter(new Function<String, Boolean>() {
			private static final long serialVersionUID = 2282788337086017539L;
			@Override
			public Boolean call(String v1) throws Exception {
				return LogProcessorUtil.isNeed(v1);
			}
		});
		// 将日志转换成Key-value形式，线程名 - 行日志内容
		JavaPairDStream<String, String> threadName_lines = lines.mapToPair(new PairFunction<String, String, String>() {
			private static final long serialVersionUID = -2515183170901051388L;
			@Override
			public Tuple2<String, String> call(String log) throws Exception {
				String threadName = "";
				try {
					threadName = LogParseUtils.parseLog(log).getThreadName();
				} catch (Exception e) {
					System.out.println(e.getMessage() + " Log: " + log);
				}
				return new Tuple2<String, String>(threadName, log);
			}
		});
		// 将同一线程名的日志进行聚合，并且按照线程名进行Suffer
		JavaPairDStream<String, Iterable<String>> threadName_linesList = threadName_lines.groupByKey();
		// 将同一个线程下面的所有日志进行拼接
		JavaPairDStream<String, String> threadName_linesString = threadName_linesList.mapValues(new Function<Iterable<String>, String>() {
			private static final long serialVersionUID = 6087123398991915565L;
			@Override
			public String call(Iterable<String> v1) throws Exception {
				return LogProcessorUtil.concatLog(v1);
			}
		});
		threadName_linesString.foreachRDD(new VoidFunction<JavaPairRDD<String, String>>(){
			private static final long serialVersionUID = -3553106151559053607L;
			@Override
			public void call(JavaPairRDD<String, String> keysValues) throws Exception {
				keysValues.foreach(new VoidFunction<Tuple2<String, String>>(){
					private static final long serialVersionUID = 1397413354356097992L;
					@Override
					public void call(Tuple2<String, String> keyValue) throws Exception {
						String log = keyValue._1() + THREAD_LOG_SPLIT + keyValue._2();
						LOGGER.info("ThreadName: " + keyValue._1());
						System.out.println("ThreadName: " + keyValue._1());
						kafkaProducer.send(new String[]{Constants.getInstance().getValue("client.service.kafka.log.merge.topic")}, log);
					}
				});
			}}
		);
	}

	@Override
	public void saveEnvironment() {
		// TODO Auto-generated method stub

	}

	public Map<String, Integer> getKafkaTopic() {
		// Map<主题名, 线程数>
		Map<String, Integer> topicMap = new HashMap<String, Integer>();
		String topicsStr = Constants.getInstance().getValue("client.service.kafka.log.topic");
		// 解析主题
		String[] topics = topicsStr.split(",");
		// 解析线程数
		int numThreads = Integer.parseInt(Constants.getInstance().getValue("client.service.spark.thread.nums"));
		for (String topic : topics) {
			topicMap.put(topic, numThreads);
		}
		return topicMap;
	}

	@Override
	public long getIntervalTime() {
		return 2000;
	}
}
