package com.lvmama.java.rhino.etl.process;

import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;

import org.apache.log4j.Logger;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;

import com.lvmama.java.rhino.etl.core.AbstractKafkaSparkStreamingTemplate;
import com.lvmama.java.rhino.spark.utils.Constants;
import com.lvmama.java.rhino.spark.utils.LogComparator;
import com.lvmama.java.rhino.spark.utils.LogProcessorUtil;

import scala.Tuple2;

/**
 * 将日志再次合并：<br>
 * 1、合并完成的日志将发送到解析队列<br>
 * 2、未完成合并的继续丢入合并队列，将被再次合并<br>
 * 3、合并超时的日志将发送到超时队列 
 * @author wxliyong
 */
public class SparkStreamingReMergeLogProcesser extends AbstractKafkaSparkStreamingTemplate {

	private static final long serialVersionUID = -3518397560871463631L;

	private static final Logger LOGGER = Logger.getLogger(SparkStreamingReMergeLogProcesser.class);
	
	@Override
	public Map<String, Integer> getKafkaTopic() {
		// Map<主题名, 线程数>
		Map<String, Integer> topicMap = new HashMap<String, Integer>();
		String topicsStr = Constants.getInstance().getValue("client.service.kafka.log.merge.topic");
		// 解析主题
		String[] topics = topicsStr.split(",");
		// 解析线程数
		int numThreads = Integer.parseInt(Constants.getInstance().getValue("client.service.spark.thread.nums"));
		for (String topic : topics) {
			topicMap.put(topic, numThreads);
		}
		return topicMap;
	}

	@Override
	public void excute(JavaDStream<String> lines) {
		// 将日志转换成Key-value形式，线程名 - 行日志内容
		JavaPairDStream<String, String> threadName_lines = lines.mapToPair(new PairFunction<String, String, String>() {
			private static final long serialVersionUID = -2515183170901051388L;
			@Override
			public Tuple2<String, String> call(String log) throws Exception {
				String[] logs = log.split(THREAD_LOG_SPLIT);
				if (logs.length != 2) {
					return null;
				}
				return new Tuple2<String, String>(logs[0], logs[1]);
			}
		});
		// 将同一线程名的日志进行聚合，并且按照线程名进行Suffer
		JavaPairDStream<String, Iterable<String>> threadName_linesList = threadName_lines.groupByKey();
		// 将同一个线程下面的所有日志进行拼接
		JavaPairDStream<String, String> threadName_linesString = threadName_linesList.mapValues(new Function<Iterable<String>, String>(){
			private static final long serialVersionUID = 3667785468066296048L;
			@Override
			public String call(Iterable<String> v1) throws Exception {
				List<String> mergeLogList = new ArrayList<String>();
				Iterator<String> it = v1.iterator();
				while(it.hasNext()) {
					String logStr = it.next();
					List<String> temLogList = LogProcessorUtil.splitLog(logStr);
					if(temLogList != null) {
						mergeLogList.addAll(temLogList);
					} else {
						// TODO
						System.out.println("有问题的日志： " + logStr);
					}
				}
				return LogProcessorUtil.concatLog(mergeLogList);
			}
		});
		threadName_linesString.foreachRDD(new VoidFunction<JavaPairRDD<String, String>>(){
			private static final long serialVersionUID = 1304087117582961200L;
			@Override
			public void call(JavaPairRDD<String, String> keysValues) throws Exception {
				keysValues.foreach(new VoidFunction<Tuple2<String, String>>(){
					private static final long serialVersionUID = 2410304799959932466L;
					@Override
					public void call(Tuple2<String, String> keyValue) throws Exception {
						List<String> logList = LogProcessorUtil.splitLog(keyValue._2());
						Collections.sort(logList, new LogComparator());
						if(LogProcessorUtil.isCompleteLog(logList)) {
							LOGGER.info("Log is completed, ThreadName: " + keyValue._1());
							System.out.println("Log is completed, ThreadName: " + keyValue._1());
							Collections.sort(logList, new LogComparator());
							kafkaProducer.send(new String[]{Constants.getInstance().getValue("client.service.kafka.log.parse.topic")}, LogProcessorUtil.concatLog(logList));
							//kafkaProducer.send(new String[]{Constants.getInstance().getValue("client.service.kafka.log.save.topic")}, serializeRovinceCities(parseResult));
							return;
						} else {
							String firstLog = logList.get(0);
							Date printDate = LogProcessorUtil.getPrintDate(firstLog);
							if(printDate == null) {
								return;
							}
							Calendar printCal = Calendar.getInstance();
							printCal.setTime(printDate);
							int outtime = Integer.parseInt(Constants.getInstance().getValue("client.service.log.merge.outtime"));
							printCal.add(Calendar.SECOND, outtime);
							// 若是一个请求达到超时时限，则将日志丢入超时队列
							if(printCal.getTime().before(new Date())) {
								LOGGER.info("Request is time out, ThreadName: " + keyValue._1());
								System.out.println("Request is time out, ThreadName: " + keyValue._1());
								kafkaProducer.send(new String[]{Constants.getInstance().getValue("client.service.kafka.log.outtime.topic")}, keyValue._2());
								return;
							}
							String log = keyValue._1() + THREAD_LOG_SPLIT + keyValue._2();
							LOGGER.info("ThreadName: " + keyValue._1());
							System.out.println("Log is not completed, ReMerge, ThreadName: " + keyValue._1());
							kafkaProducer.send(new String[]{Constants.getInstance().getValue("client.service.kafka.log.merge.topic")}, log);
						}
					}
				});
			}
		});
	}
	
	@Override
	public void saveEnvironment() {
		// TODO Auto-generated method stub
	}

	@Override
	public long getIntervalTime() {
		return 4000;
	}

}
