package hook;


/**
 * 
 * @author wyl
 * @date 2020/12/09
 */

import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext;
import org.apache.hadoop.hive.ql.hooks.HookContext;
import org.apache.hadoop.hive.ql.plan.HiveOperation;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.io.IOException;
import java.util.HashSet;
import java.util.Properties;


public class CustomHook implements ExecuteWithHookContext {



	/** 存储Hive的SQL操作类型 {@value} */
	private static final HashSet<String> OPERATION_NAMES = new HashSet<String>();

	// HiveOperation是一个枚举类，封装了Hive的SQL操作类型
	// 监控SQL操作类型
	static {
		// 建表
		OPERATION_NAMES.add(HiveOperation.CREATETABLE.getOperationName());
		// 修改数据库属性
		OPERATION_NAMES.add(HiveOperation.ALTERDATABASE.getOperationName());
		// 修改数据库属主
		OPERATION_NAMES.add(HiveOperation.ALTERDATABASE_OWNER.getOperationName());
		// 修改表属性,添加列
		OPERATION_NAMES.add(HiveOperation.ALTERTABLE_ADDCOLS.getOperationName());
		// 修改表属性,表存储路径
		OPERATION_NAMES.add(HiveOperation.ALTERTABLE_LOCATION.getOperationName());
		// 修改表属性
		OPERATION_NAMES.add(HiveOperation.ALTERTABLE_PROPERTIES.getOperationName());
		// 表重命名
		OPERATION_NAMES.add(HiveOperation.ALTERTABLE_RENAME.getOperationName());
		// 列重命名
		OPERATION_NAMES.add(HiveOperation.ALTERTABLE_RENAMECOL.getOperationName());
		// 更新列,先删除当前的列,然后加入新的列
		OPERATION_NAMES.add(HiveOperation.ALTERTABLE_REPLACECOLS.getOperationName());
		// 创建数据库
		OPERATION_NAMES.add(HiveOperation.CREATEDATABASE.getOperationName());
		// 删除数据库
		OPERATION_NAMES.add(HiveOperation.DROPDATABASE.getOperationName());
		// 删除表
		OPERATION_NAMES.add(HiveOperation.DROPTABLE.getOperationName());
		// 查询
		OPERATION_NAMES.add(HiveOperation.QUERY.getOperationName());
	}

	public void run(HookContext hookContext) throws Exception {
		Properties config = new Properties();

		try {
			config.load(Producer.class.getClassLoader().getResourceAsStream("application.properties"));
		} catch (IOException e) {
			e.printStackTrace();
		}

		// 执行计划
		QueryPlan plan = hookContext.getQueryPlan();
		// 操作名称
		String operationName = plan.getOperationName();


		if (OPERATION_NAMES.contains(operationName) && !plan.isExplain()) {
			System.err.println("监控SQL操作");
			ProducerRecord<String, String> record = null;
			Properties configs = Producer.initConfig();
			KafkaProducer<String, String> producer = new KafkaProducer<String, String>(configs);

			switch (hookContext.getHookType()) {
			case POST_EXEC_HOOK:
				record = new ProducerRecord<String, String>(config.getProperty("datasource.kafka.post.topic"),
						plan.getQueryId(), plan.getQueryString());
				break;
			case PRE_EXEC_HOOK:
				record = new ProducerRecord<String, String>(config.getProperty("datasource.kafka.pre.topic"),
						plan.getQueryId(), plan.getQueryString());
				break;
			case ON_FAILURE_HOOK:
				record = new ProducerRecord<String, String>(config.getProperty("datasource.kafka.failure.topic"),
						plan.getQueryId(), plan.getQueryString());
				break;
			default:
				record = new ProducerRecord<String, String>(config.getProperty("datasource.kafka.pre.topic"),
						plan.getQueryId(), plan.getQueryString());
			}

			Producer producerThread = new Producer(producer, record);
			producerThread.start();

		} else {
			System.err.println("不在监控范围，忽略该hook!");
		}

	}
}
