package com.binarysoft.service.common.hbase;

import java.io.IOException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Hashtable;
import java.util.Map;

import javax.xml.bind.JAXBElement;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Table;

import common.framework.dsb.AbstractServiceBean;
import common.framework.dsb.annotation.DSBService;
import common.framework.dsb.service.ServiceContext;
import common.framework.jaxb.JAXBTool;
import common.framework.log.Logger;

@DSBService(name = "DSB/HBaseService")
public class HBaseServiceBean extends AbstractServiceBean implements HBaseService {

	/**
	 * key: cluster_name, each cluster has a HBase cluster instance.
	 * HBaseCluster对象封装了单个HBase cluster的配置参数
	 */
	private Map<String, HBaseCluster> hbaseClusters = new Hashtable<String, HBaseCluster>();
	/**
	 * key: cluster_name, one cluster name means one HBase cluster instance.
	 * Connection对象封装了单个HBase client的连接池
	 */
	private Map<String, Connection> hbaseConnections = new Hashtable<String, Connection>();

	@Override
	public void start(ServiceContext serviceContext) throws Exception {
		super.start(serviceContext);

		// load clusters configurations from hbase-clusters.xml.
		loadConfinguration();

		// init connections.
		initConnections();
	}

	@Override
	public boolean isAvailable(String clusterName) throws Exception {
		HBaseCluster hbaseCluster = hbaseClusters.get(clusterName);
		if (hbaseCluster == null) {
			throw new Exception("HBase cluster: " + clusterName + " not found!");
		}

		Connection conn = hbaseConnections.get(clusterName);

		if (conn == null) {
			throw new Exception("HBase connection pool:" + clusterName + " not found!");
		}

		boolean result = true;

		try {

			Admin admin = conn.getAdmin();
			TableName[] tableNames = admin.listTableNames();

			for (TableName tname : tableNames) {
				tname.getNameWithNamespaceInclAsString();
			}

		} catch (Throwable e) {
			Logger.printStackTrace(Logger.FATAL_LEVEL, "", e);
			result = false;
		}

		Logger.log(Logger.FUNCTION_LEVEL, "[" + clusterName + "]" + " isAvailable: " + result);

		return result;
	}

	@Override
	public Table getHtable(String clusterName, String tableName) throws Exception {

		Logger.log(Logger.FUNCTION_LEVEL, "getHtable[" + clusterName + "][" + tableName + "]");
		Connection conn = hbaseConnections.get(clusterName);

		if (conn == null) {
			throw new Exception("HBase cluster:" + clusterName + " not found!");
		}

		return conn.getTable(TableName.valueOf(tableName));
	}

	@Override
	public Collection<String> listClusters() throws Exception {
		return hbaseClusters.keySet();
	}

	@Override
	public Collection<String> listTableNames(String clusterName) throws Exception {

		Connection conn = hbaseConnections.get(clusterName);

		if (conn == null) {
			throw new Exception("HBase cluster:" + clusterName + " not found!");
		}

		Admin admin = conn.getAdmin();
		TableName[] tableNames = admin.listTableNames();

		Collection<String> names = new ArrayList<String>();
		for (TableName tname : tableNames) {
			names.add(tname.getNameWithNamespaceInclAsString());
		}

		return names;
	}

	@Override
	public HTableDescriptor getTableDescriptor(String clusterName, String tableName) throws Exception {
		Connection conn = hbaseConnections.get(clusterName);

		if (conn == null) {
			throw new Exception("HBase cluster:" + clusterName + " not found!");
		}

		Admin admin = conn.getAdmin();
		TableName tname = TableName.valueOf(tableName);

		if (!admin.isTableAvailable(tname)) {
			throw new Exception("Table:" + tname + " not found!");
		}

		return admin.getTableDescriptor(tname);
	}

	@Override
	public void refresh() throws Exception {

	}

	@Override
	public void close() throws Exception {

		for (Connection conn : hbaseConnections.values()) {
			try {
				conn.close();
			} catch (IOException e) {
				e.printStackTrace(System.out);
			}
		}
	}

	private void loadConfinguration() throws Exception {

		Logger.log(Logger.FUNCTION_LEVEL, "loadConfinguration begin..");

		URL configFileURL = serviceContext.getConfigFile("hbase-clusters.xml");
		String packageName = this.getClass().getPackage().getName();

		JAXBElement<HBaseClusterList> jaxbElement = (JAXBElement<HBaseClusterList>) JAXBTool.unmarshal(configFileURL, packageName, this.getClass().getClassLoader());
		HBaseClusterList hbcList = jaxbElement.getValue();

		for (HBaseCluster cluster : hbcList.hbaseCluster) {
			String clusterName = cluster.getClusterName().trim();
			if (hbaseClusters.containsKey(clusterName)) {
				throw new Exception("Error duplicated cluster name: " + clusterName);
			}
			hbaseClusters.put(clusterName, cluster);
		}

		Logger.log(Logger.FUNCTION_LEVEL, "loadConfinguration end..");
	}

	private void initConnections() throws Exception {

		for (HBaseCluster cluster : hbaseClusters.values()) {

			Configuration conf = HBaseConfiguration.create();

			conf.set("hbase.zookeeper.quorum", cluster.hbaseZookeeperQuorum.trim());
			conf.set("zookeeper.znode.parent", cluster.zookeeperZnodeParent.trim());
			conf.set("hbase.zookeeper.property.clientPort", cluster.hbaseZookeeperPropertyClientPort.trim());
			conf.set("zookeeper.recovery.retry", cluster.zookeeperRecoveryRetry.trim());
			conf.set("hbase.client.pause", cluster.hbaseClientPause.trim());
			conf.set("hbase.client.retries.number", cluster.hbaseClientRetriesNumber.trim());
			conf.set("hbase.rpc.timeout", cluster.hbaseRpcTimeout.trim());
			conf.set("hbase.client.operation.timeout", cluster.hbaseClientOperationTimeout.trim());
			conf.set("hbase.client.scanner.timeout.period", cluster.hbaseClientScannerTimeoutPeriod.trim());

			Logger.log(Logger.FUNCTION_LEVEL, "hbase.zookeeper.quorum=" + cluster.hbaseZookeeperQuorum.trim());
			Logger.log(Logger.FUNCTION_LEVEL, "zookeeper.znode.parent=" + cluster.zookeeperZnodeParent.trim());
			Logger.log(Logger.FUNCTION_LEVEL, "hbase.zookeeper.property.clientPort=" + cluster.hbaseZookeeperPropertyClientPort.trim());
			Logger.log(Logger.FUNCTION_LEVEL, "zookeeper.recovery.retry=" + cluster.zookeeperRecoveryRetry.trim());
			Logger.log(Logger.FUNCTION_LEVEL, "hbase.client.pause=" + cluster.hbaseClientPause.trim());
			Logger.log(Logger.FUNCTION_LEVEL, "hbase.client.retries.number=" + cluster.hbaseClientRetriesNumber.trim());
			Logger.log(Logger.FUNCTION_LEVEL, "hbase.rpc.timeout=" + cluster.hbaseRpcTimeout.trim());
			Logger.log(Logger.FUNCTION_LEVEL, "hbase.client.operation.timeout=" + cluster.hbaseClientOperationTimeout.trim());
			Logger.log(Logger.FUNCTION_LEVEL, "hbase.client.scanner.timeout.period=" + cluster.hbaseClientScannerTimeoutPeriod.trim());
			Logger.log(Logger.FUNCTION_LEVEL, "createConnection begin: " + cluster.clusterName);
			Connection conn = ConnectionFactory.createConnection(conf);
			Logger.log(Logger.FUNCTION_LEVEL, "createConnection end.");

			hbaseConnections.put(cluster.clusterName.trim(), conn);
		}

	}
}
