package com.rainsoft.center.isec.stream.base;

import com.alibaba.fastjson.JSONObject;
import com.rainsoft.center.isec.common.annotation.HTable;
import com.rainsoft.center.isec.stream.content.entity.AHBaseEntity;
import com.rainsoft.center.isec.common.entity.Constants;
import com.rainsoft.center.isec.common.utils.ISecHBaseUtils;
import com.rainsoft.center.isec.common.utils.ISecSparkUtils;
import com.rainsoft.center.isec.common.utils.JDBCHelper;
import com.rainsoft.center.isec.stream.utils.CustomKafkaDStreamUtil;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import scala.Tuple2;

import java.util.Arrays;
import java.util.List;

/**
 * @Name com.rainsoft.center.isec.stream.content.controller.BaseContentDataLoad
 * @Description
 * @Author Elwyn
 * @Version 2017/11/21
 * @Copyright 上海云辰信息科技有限公司
 **/
public abstract class ABaseDataLoadToHBase<T extends AHBaseEntity> implements IBaseDataLoad<T> {
	private long duration;
	protected String appName;
	protected Class<T> tClass;
	private String hTableName;
	private String family;
	private String[] topicName;


	public ABaseDataLoadToHBase(long duration, Class<T> tClass) {
		this.tClass = tClass;
		this.appName = tClass.getName();
		this.duration = duration;
		initParam();
	}

	@Override
	public abstract String generateRowKey(T t);

	@Override
	public JavaStreamingContext getJavaStreamingContext() {
		return ISecSparkUtils.getJavaStreamingContext(getSparkConf(appName), duration);
	}

	@Override
	public JavaSparkContext getJavaSparkContext() {
		return ISecSparkUtils.getJavaSparkContext(getSparkConf(appName));
	}

	@Override
	public SparkConf getSparkConf(String appName) {
		return ISecSparkUtils.getSparkConf(appName);
	}

	@Override
	public void initParam() {
		HTable hTable = tClass.getDeclaredAnnotation(HTable.class);
		if (hTable != null) {
			this.topicName = hTable.topicName();
			this.hTableName = hTable.tableName();
			this.family = hTable.family();
			if (topicName.length == 0 || "".equals(hTableName)) {
				throw new RuntimeException("实体必须设置topicName或tableName");
			}
		} else {
			throw new RuntimeException("实体必须加com.rainsoft.center.isec.common.annotation.HTable注解");
		}
	}

	@Override
	public JavaInputDStream<ConsumerRecord<String, String>> getJavaInputDStream(JavaStreamingContext javaStreamingContext) {
		return new CustomKafkaDStreamUtil<String, String>().getKafkaStreaming(javaStreamingContext, Constants.KAFKA_BROKERS, Arrays.asList(topicName));
	}

	@Override
	public void process() {
		JavaStreamingContext javaStreamingContext = getJavaStreamingContext();
		JavaInputDStream<ConsumerRecord<String, String>> javaInputDStream = getJavaInputDStream(javaStreamingContext);
		JavaDStream<T> dStream = javaInputDStream.map(consumerRecord -> {
			System.out.println(consumerRecord.value());
			return JSONObject.parseObject(consumerRecord.value(), tClass);
		});
		saveToHBase(dStream);
		start(javaStreamingContext);
	}


	@Override
	public void saveToHBase(JavaDStream<T> dStream) {
		dStream.foreachRDD(tJavaRDD -> {
			//如果这个RDD是空的直接返回
			if (tJavaRDD.isEmpty()) {
				return;
			}
			//自定义方法,构建HBase配置,设置输入表
			Job job = ISecSparkUtils.setOutputTable(Constants.ZK_HOSTS, hTableName);
			if (job != null) {
				//将RDD转化为(ImmutableBytesWritable, Put)的格式
				//持久化到HBase
				tJavaRDD.mapToPair(t -> {
					//构建put对象
					Put put = ISecHBaseUtils.generatePut(t, generateRowKey(t), family);
					return new Tuple2<>(new ImmutableBytesWritable(), put);
				}).saveAsNewAPIHadoopDataset(job.getConfiguration());
			}
		});
	}

	@Override
	public void saveToHBase(JavaRDD<T> javaRDD) {
		//如果这个RDD是空的直接返回
		if (javaRDD.isEmpty()) {
			return;
		}
		System.out.println("zookeeper======================="+Constants.ZK_HOSTS);
		//自定义方法,构建HBase配置,设置输入表
		Job job = ISecSparkUtils.setOutputTable(Constants.ZK_HOSTS, hTableName);
		if (job != null) {
			//将RDD转化为(ImmutableBytesWritable, Put)的格式
			//持久化到HBase
			javaRDD.mapToPair(t -> {
				//构建put对象
				Put put = ISecHBaseUtils.generatePut(t, generateRowKey(t), family);

				return new Tuple2<>(new ImmutableBytesWritable(), put);
			}).saveAsNewAPIHadoopDataset(job.getConfiguration());
		}
	}

	public int[] saveToOracle(Dataset<Row> rowJavaRDD, String sql) {
		List<Object[]> jdbcArgs = rowJavaRDD.javaRDD().map((Function<Row, Object[]>) v1 -> {
			int size = v1.size();
			Object[] objects = new Object[size];
			for (int i = 0; i < size; i++) {
				Object o = v1.get(i);
				objects[i] = o;
			}
			return objects;
		}).collect();


		return JDBCHelper.getInstance().executeBatch(sql,jdbcArgs);
	}





	@Override
	public void start(JavaStreamingContext javaStreamingContext) {
		javaStreamingContext.start();
		try {
			javaStreamingContext.awaitTermination();
			javaStreamingContext.stop();
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
	}
}
