package com.iot.nev.gateway.connector.spark;

import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
/*
 * Copyright (c) 2017, 1DAOYUN and/or its affiliates. All rights reserved.
 * 1DAOYUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
 */
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.iot.nev.gateway.client.Client;
import com.iot.nev.gateway.connector.Connector;
import com.iot.nev.gateway.utils.SparkUtils;

import kafka.serializer.StringDecoder;
import scala.Tuple2;

/**
 * 
 * @author soongxueyong
 * @since V1.0
 */
public class SparkConnector implements Client{

	private static Logger logger = LoggerFactory.getLogger(SparkConnector.class);
	private static String topic = "mytopic"; // Tina-topic
	private static AtomicLong orderCount = new AtomicLong(0);
	private static String logPath = System.getProperty("user.dir") + "/logs/sparkLog.txt";
	private static JavaStreamingContext jssc;

	public static void getServer() {
		// Create context with a 2 seconds batch interval
		jssc = SparkUtils.getJavaStreamingContext("JavaKafkaWordCount", "local[2]", null, Durations.seconds(1));
		Set<String> topicsSet = new HashSet<>(Arrays.asList(topic.split(",")));

		Map<String, String> kafkaParams = new HashMap<>();
		// kafkaParams.put("metadata.broker.list", servers);
		kafkaParams.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "master.hadoop:6667");
		kafkaParams.put(ConsumerConfig.GROUP_ID_CONFIG, getID());

		// Create direct kafka stream with brokers and topics
		JavaPairInputDStream<String, String> orderMsgStream = KafkaUtils.createDirectStream(jssc, String.class,
				String.class, StringDecoder.class, StringDecoder.class, kafkaParams, topicsSet);

		// json与对象映射对象
		// final ObjectMapper mapper = new ObjectMapper();
		JavaDStream<String> orderDStream = orderMsgStream.map(new Function<Tuple2<String, String>, String>() {
			private static final long serialVersionUID = 1L;

			@Override
			public String call(Tuple2<String, String> t2) throws Exception {
				// saveData(t2._2);
				logger.info(t2._2);
				// System.out.println(t2._2);
				return t2._2;
			}
		}).cache();

		/*// 对DStream中的每一个RDD进行操作
		orderDStream.foreachRDD(new VoidFunction<JavaRDD<String>>() {
			private static final long serialVersionUID = 1L;

			@Override
			public void call(JavaRDD<String> magJavaRDD) throws Exception {
				long count = magJavaRDD.count();
				if (count > 0) {
					// 累加订单总数
					orderCount.addAndGet(count);
					// 对RDD中的每一个订单，首先进行一次Map操作，产生一个包含了每笔订单的价格的新的RDD
					// 然后对新的RDD进行一次Reduce操作，计算出这个RDD中所有订单的价格众合
					magJavaRDD.flatMap(x -> Arrays.asList(x.split(",")).iterator()).mapToPair(x -> new Tuple2<String, Integer>(x, 1))
							.reduceByKey((x, y) -> x + y);
					
					// 数据订单总数和价格总和，生产环境中可以写入数据库
					logger.warn("-------Total msg count : " + orderCount.get() + " --------- ");
				}
			}
		});*/
		orderDStream.print();
		jssc.start();
		jssc.awaitTermination();
	}
	
	@Override
	public void start(){
		if(jssc == null){
			init();
		}
	}
	
	@Override
	public void stop(){
		if(jssc != null){
			jssc.stop();
		}
	}
	
	public static String getID() {
		return SparkConnector.class.getName() + System.currentTimeMillis();
	}

	@Override
	public Connector getConnector() {
		return null;
	}

	@Override
	public Client getClient() {
		return this;
	}

	@Override
	public void init() {
		if(jssc == null){
			getServer();
		}
	}

	@Override
	public void restart() {
		stop();
		start();
	}

	@Override
	public boolean isActive() {
		return false;
	}

	@Override
	public int getConnectedCount() {
		return 0;
	}

	@Override
	public float getServiceLevel() {
		return 0;
	}
	
	private static BufferedWriter out = null;
	static int num = 1;
	public static void saveData(String str) {
		try {
			out = new BufferedWriter(new FileWriter(logPath, true));
			out.write(num++ +"\t"+ str +"\t"+ System.currentTimeMillis() + "\r\n");
			out.flush();
		} catch (IOException e) {
			System.out.println("spark write log error");
		}
	}
	
	public void close(){
		try {
			if (out != null) {
				out.close();
			}
		} catch (IOException e) {
			e.printStackTrace();
		}
	}
}