package cn.kingyea.utils;

import java.io.File;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.util.HashMap;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;

import parquet.org.slf4j.Logger;
import parquet.org.slf4j.LoggerFactory;

public class HiveUtils {
	private static final boolean isSecureVerson = true;

	private static final String HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver";

	private static final String ZOOKEEPER_DEFAULT_LOGIN_CONTEXT_NAME = "Client";
	private static final String ZOOKEEPER_SERVER_PRINCIPAL_KEY = "zookeeper.server.principal";
	private static final String ZOOKEEPER_DEFAULT_SERVER_PRINCIPAL = "zookeeper/hadoop";

	private static Configuration CONF = null;
	private static String KRB5_FILE = null;
	private static String USER_NAME = null;
	private static String USER_KEYTAB_FILE = null;
	private static String zkQuorum = null;

	private static Logger LOG = LoggerFactory.getLogger(HiveUtils.class);

	public HiveUtils() {
	}

	static {
		try {
			CONF = new Configuration();
			// 其中，zkQuorum的"xxx.xxx.xxx.xxx"为集群中Zookeeper所在节点的IP，端口默认是24002
			zkQuorum = "192.168.0.13:24002,192.168.0.14:24002,192.168.0.15:24002";
			// 设置新建用户的USER_NAME，其中"xxx"指代之前创建的用户名，例如创建的用户为user，则USER_NAME为user
			USER_NAME = "hive5";
//			USER_NAME = "admin";

			if (isSecureVerson) {
				// 设置客户端的keytab和krb5文件路径
				String userdir = System.getProperty("user.dir") + File.separator + "conf" + File.separator;
				USER_KEYTAB_FILE = userdir + "user.keytab";
				KRB5_FILE = userdir + "krb5.conf";

				CONF.addResource(new Path(userdir + "core-site.xml"));
				CONF.addResource(new Path(userdir + "hdfs-site.xml"));
				CONF.addResource(new Path(userdir + "hbase-site.xml"));

				LoginUtil.setJaasConf(ZOOKEEPER_DEFAULT_LOGIN_CONTEXT_NAME, USER_NAME, USER_KEYTAB_FILE);
				LoginUtil.setZookeeperServerPrincipal(ZOOKEEPER_SERVER_PRINCIPAL_KEY,
						ZOOKEEPER_DEFAULT_SERVER_PRINCIPAL);

				// 安全模式
				// Zookeeper登录认证
				LoginUtil.login(USER_NAME, USER_KEYTAB_FILE, KRB5_FILE, CONF);
			}
		} catch (Exception e) {
			LOG.info(e.getMessage());
			e.printStackTrace();
		}
	}

	private static String getUrl() {
		// 拼接JDBC URL
		StringBuilder sBuilder = new StringBuilder("jdbc:hive2://").append(zkQuorum).append("/");

		if (isSecureVerson) {

			// 在使用多实例特性时append("hiveserver;sasl.qop=auth-conf;auth=KERBEROS;principal=hive/hadoop.hadoop.com@HADOOP.COM")的"hiveserver2"与"hive/hadoop.hadoop.com@HADOOP.COM"根据使用不同的实例进行变更
			// 例如使用Hive1实例则改成"hiveserver2_1"与"hive1/hadoop.hadoop.com@HADOOP.COM"，Hive2实例为"hiveserver2_2",以此类推
			sBuilder.append(";serviceDiscoveryMode=").append("zooKeeper").append(";zooKeeperNamespace=")
					.append("hiveserver2;sasl.qop=auth-conf;auth=KERBEROS;principal=hive/hadoop.hadoop.com@HADOOP.COM")

					.append(";");
		} else {
			// 普通模式
			// 使用多实例特性的"hiveserver2"变更参照安全模式
			sBuilder.append(";serviceDiscoveryMode=").append("zooKeeper").append(";zooKeeperNamespace=")
					.append("hiveserver2;auth=none;");
		}
		String url = sBuilder.toString();
		return url;
	}

	private static Connection connection = null;

	private static Connection getConnection() {
		try {
			synchronized (HiveUtils.class) {
				if (null != connection) {
					return connection;
				} else {
					Class.forName(HIVE_DRIVER);
					connection = DriverManager.getConnection(getUrl(), "", "");
				}
			}
			return connection;
		} catch (Exception e) {
			LOG.info(e.getMessage());
		}
		return null;
	}

	public static Object executeQuery(String sql) {
		try {
			PreparedStatement ps = getConnection().prepareStatement(sql);
			ResultSet rs = ps.executeQuery();
			ResultSetMetaData meta = rs.getMetaData();
			int columnCount = meta.getColumnCount();
			HashMap<String, String> map = new HashMap<String, String>();
			while (rs.next()) {
				for (int i = 1; i <= columnCount; i++) {
					String lableName = meta.getColumnLabel(i);
					map.put(lableName.toLowerCase(), rs.getString(i));
				}
			}
			return map;
		} catch (Exception e) {
			e.printStackTrace();
			LOG.info(e.getMessage());
		}
		return null;
	}

	public static Object executeUpdate(String sql) {
		try {
			if(null != sql && sql.contains("drop")){
//				return null;//。。
			}
			Connection connection2 = getConnection();
			
			PreparedStatement ps = connection2.prepareStatement(sql);
			boolean rs = ps.execute();
			return rs;
		} catch (Exception e) {
			e.printStackTrace();
			LOG.info(e.getMessage());
		}
		return null;
	}
}
