package com.youxin.logprocess.controller;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.PairFunction;

import com.youxin.busfoundation.bean.FileBeatBean;
import com.youxin.busfoundation.config.KafkaConfig;
import com.youxin.busfoundation.config.MonitorBaseBean;
import com.youxin.busfoundation.config.SparkConfig;
import com.youxin.foundation.utils.JsonUtils;
import com.youxin.foundation.utils.thread.ScheduleThreadPool;
import com.youxin.logprocess.common.MonitorGlobalConfig;

import java.io.Serializable;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.TimeZone;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.*;
import org.apache.spark.streaming.kafka010.*;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.log4j.Logger;

import scala.Function2;
import scala.Tuple2;

/**
 * Spark Streaming监控日志异常
 * 
 * @author yingde.cao
 *
 */
public class SparkController extends BaseMonitorController implements Serializable {

	private static final long serialVersionUID = 4575894675745L;

	private static Logger logger = Logger.getLogger(SparkController.class);
	private static SparkController controller = new SparkController();
	private JavaSparkContext sc = null;
	private JavaStreamingContext streamingContext = null;
	public static SparkController instance() {
		return controller;
	}

	/**
	 * 运行Spark Streaming
	 */
	public void execute() {
		
		try {
			ScheduleThreadPool.scheduleWithFixedDelay(new Runnable() {				
				@Override
				public void run() {
					monitor();					
				}
			}, 30000, 3000);
			
		} catch (Exception e) {
			logger.error("Execute", e);
		}
	}
	
	/**
	 * 监控主方法
	 */
	private void monitor() {
		
		try {
			KafkaConfig kafkaConfig = KafkaConfig.getConfig();
			Map<String, Object> kafkaParams = new HashMap<>(6);
			kafkaParams.put("bootstrap.servers", kafkaConfig.getServers());
			kafkaParams.put("key.deserializer", StringDeserializer.class);
			kafkaParams.put("value.deserializer", StringDeserializer.class);
			kafkaParams.put("group.id", kafkaConfig.getGroupId());
			kafkaParams.put("max.partition.fetch.bytes", 10485720);
			kafkaParams.put("receive.buffer.bytes", 10485720);

			kafkaParams.put("auto.offset.reset", "earliest");
			kafkaParams.put("enable.auto.commit", true);	
			//kafkaParams.put("enable.auto.commit", true);			

			//kafkaParams.put("max.partition.fetch.bytes", 10485720);

			
			Collection<String> topics = KafkaConfig.getConfig().getTopicsList();
			SparkConfig sparkConfig = SparkConfig.getConfig();	
			SparkConf conf = new SparkConf().setAppName("LogProcess").setMaster(sparkConfig.getMaster());
//					.set("spark.executor.cores","1")
//					.set("spark.executor.memory","512m")
//					.set("spark.cores.max","1")
//					;
					
			sc = new JavaSparkContext(conf);
			
			// JavaSparkContext sc = new
			// JavaSparkContext("spark://192.168.52.140:7077", "First Spark
			// App",conf);
			
			streamingContext = new JavaStreamingContext(sc, Durations.milliseconds(sparkConfig.getDuration()));

//			Properties properties=new Properties();		
//	    	properties.put("enable.auto.commit", "true");
//	    	properties.put("auto.commit.interval.ms", "1000");
//	    	properties.put("session.timeout.ms", "30000");
//	    	properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//	    	properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//			properties.put("bootstrap.servers", kafkaConfig.getServers());
//
//			KafkaConsumerTask consumerTask=new KafkaConsumerTask(properties, topics.toArray(new String[]{}));
			//Map<String, List<PartitionInfo>> partitionsList=consumerTask.listTopics();
			
			
			JavaInputDStream<ConsumerRecord<String, String>> stream = KafkaUtils.createDirectStream(streamingContext,
					LocationStrategies.PreferConsistent(),
					ConsumerStrategies.<String, String>Subscribe(topics, kafkaParams));			

			
			stream.mapToPair(record -> {
				return new Tuple2<>(record.key(), record.value());
			});

			stream.foreachRDD(rdd -> {
				//List<ConsumerRecord<String, String>> records=rdd.collect();				
				rdd.foreachPartition(partition -> {
					//logger.warn("Spark foreachPartition *******");
	                    partition.forEachRemaining(record ->{
	                    	try {
	    						FileBeatBean fileBeatBean = (FileBeatBean) JsonUtils.stringToObject(record.value(),
	    								FileBeatBean.class);
	    						if (fileBeatBean.getFields() != null) {
	    							if (MonitorGlobalConfig.NGINX_LOG_NAME
	    									.equalsIgnoreCase(fileBeatBean.getFields().getAppId())&&fileBeatBean.getSource().indexOf("access.log")>=0) {
	    								MonitorNginxController.instance().execute(fileBeatBean);
	    							}
	    						}
	    						//logger.warn("Spark 收到信息record:=" + record.toString() + "*******");
	    					} catch (Exception e) {
	    						logger.error("FileBeatBean error,data:"+record.toString(), e);
	    					}
	                    }
	                    );
	               
	            });
				
				/*
				rdd.foreach(record -> {				
					try {
						FileBeatBean fileBeatBean = (FileBeatBean) JsonUtils.stringToObject(record.value(),
								FileBeatBean.class);
						if (fileBeatBean.getFields() != null) {
							if (MonitorGlobalConfig.NGINX_LOG_NAME
									.equalsIgnoreCase(fileBeatBean.getFields().getAppId())&&fileBeatBean.getSource().indexOf("access.log")>=0) {
								MonitorNginxController.instance().execute(fileBeatBean);
							}
						}
						logger.info("record:=" + record.toString() + "*******");
					} catch (Exception e) {
						logger.error("FileBeatBean error", e);
					}

				});
				*/
				
			});
			
			streamingContext.start();
			streamingContext.awaitTermination();
			logger.debug("finished");
		} catch (Exception e) {
			logger.error("Spark Streaming stop", e);
		} finally {
			stop();
		}
	}

	public void stop()
	{
		try {
			if (streamingContext != null) {
				streamingContext.stop(true, false);
				streamingContext.close();
				streamingContext=null;
			}
		} catch (Exception e) {
		}
		try {
			if (sc != null) {
				sc.cancelAllJobs();
				sc.close();
				sc=null;
			}
		} catch (Exception e) {
		}
	}

	@Override
	public MonitorBaseBean getMonitorBean() {
		// TODO Auto-generated method stub
		return null;
	}

}
