package com.gdlt.mq2db;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.InputStream;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.kafka.clients.consumer.ConsumerRecord;

/**
 * Created by CM on 2017/3/14.
 *
 */
public class Runner {

	public static void main(String[] args)
			throws FileNotFoundException, ClassNotFoundException, InstantiationException, IllegalAccessException,
			IllegalArgumentException, InvocationTargetException, NoSuchMethodException, SecurityException {
		
		int partitionId = Integer.parseInt(args[0]);// read partitionid and config from args.
		String configFile = args[1];

		InputStream inputStream = new FileInputStream(new File(configFile));
		Kafka2pgConfig.AddConfig(inputStream);

		Configuration conf = Kafka2pgConfig.getConf();
		String writeClasses = conf.get(Kafka2pgConfig.WRITER_CLASSES);
		String splitComma[] = writeClasses.split(","); //get the value from the xml file.

		Class<?> pgWriterRef = Class.forName(splitComma[0]);
		Constructor<?> partitionPg = pgWriterRef.getConstructor(int.class);
		final PGWriter pgWriter = (PGWriter) partitionPg.newInstance(partitionId); // 有参构造函数
		// Constructor<?> partitionPg[] = null;
		// partitionPg = writer.getConstructors();
		// PGWriter pgWriter = (PGWriter)partitionPg[0].newInstance(partitionId);
		// final PGWriter pgWriter = new PGWriter(partitionId);
		// Method getLastoffset = writer.getMethod("getLastoffset", xx.class,xx.class);
		// Class<?> offset = getLastoffset.getReturnType();
		// method.invoke(gpWriterRef.newInstance(),null);
		long offset = pgWriter.getLastoffset();// 只是为了重启读取时从数据库读取

		// Class<?> offsetWrite = Class.forName(offsetClass);
		// Constructor<?> partOffset = offsetWrite.getConstructor(int.class,long.class);
		// final KafkaReader reader = (KafkaReader)partOffset.newInstance(partitionId, offset);

		final KafkaReader reader = new KafkaReader(partitionId, offset);

		Class<?> gpWriterRef = Class.forName(splitComma[1]);
		GraphWriter graphWriter = (GraphWriter) gpWriterRef.newInstance();
		// GraphWriter graphWriter = new GraphWriter();

		Runtime.getRuntime().addShutdownHook(new Thread() {
			@Override
			public void run() {
				System.out.println("Shutdown hook ran!");
				pgWriter.close();
				reader.close();
			}
		});

		while (reader.isClosed() && pgWriter.isClosed()) {

			List<ConsumerRecord<String, String>> data = reader.getData();
			offset = reader.getOffset();

			// 两个若有一个写入失败会重复执行直至都写入。
			while (!graphWriter.write(data, offset) || !pgWriter.write(data, offset)) {
			}
			data.clear();
		}

	}

}
