package com.run.wz.center.hbase;

import java.io.IOException;
import java.io.InputStream;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.TreeSet;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.HTableInterfaceFactory;
import org.apache.hadoop.hbase.client.HTablePool;
import org.quartz.CronScheduleBuilder;
import org.quartz.CronTrigger;
import org.quartz.JobBuilder;
import org.quartz.Scheduler;
import org.quartz.SchedulerException;
import org.quartz.SchedulerFactory;
import org.quartz.TriggerBuilder;
import org.quartz.impl.StdSchedulerFactory;

import com.run.wz.center.utils.ProtocolType;


/**
 * 读取配置参数，初始化 tableList，HTablePool
 * 启动定时执行的线程任务，主要为跟踪线程池状态，以及更新tableList和HTablePool
 * @author chenxu
 * @date 2012-6-30
 */
public class HBaseBatchUtil {
	
	private static Log logger = LogFactory.getLog(HBaseBatchUtil.class);
	/**
	 * 用来进行单个表的GET构建
	 */
	private static ThreadPoolExecutor scanExecutorService = null; 
	
	/**
	 * 用来进行单个表的SCAN构建
	 */
	private static ThreadPoolExecutor getExecutorService = null;
	

	/**
	 * 计算类型的线程池，用来根据输入条件计算表以及Scan、Get，并根据表进行分组
	 */
	private static ExecutorService computeExecutorService = null;
	
	
	public static final char[] COLUMN_NAME = {'0','1','2','3','4','5','6','7','8','9','A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U' ,'V'};
	public static final String ACTIONLOG_T_NAME_PREFIX = "actionlog";
	public static final String DEFAULT_KEYCONTACT = "\u0001";
	public static final String DEFAULT_DATEFORMAT = "yyyyMM";
	public static final String DEFAULT_CONTACT = "_";
	public static final ProtocolType[] PROTOCOL_TYPES = ProtocolType.values();
	public static final TreeSet<ProtocolType> PROTOCOL_TYPESET = new TreeSet<ProtocolType>();
	static {
		for(ProtocolType pt : PROTOCOL_TYPES) {
			PROTOCOL_TYPESET.add(pt);
		}
	}
	
	/**
	 * 单批getList的超时系数，单批getList的超时时间会是整张表的超时时间乘上这个系数
	 */
	public static final double ONE_GETLIST_TIMEOUT_RATIO = 0.8;
	/**
	 * 默认超时时长 10秒
	 */
	public static final long DEFAULT_TIMEOUT = 10000;
	/**
	 * 一个表Get操作的超时时间。
	 */
	public static final long ONE_T_GET_TIMEOUT = 10000;//默认10秒
	public static final TimeUnit DEFAULT_TO_TIME_UNIT = TimeUnit.MILLISECONDS;
	private static Map<String, HTableDescriptor> htds = Collections.synchronizedMap(new HashMap<String, HTableDescriptor>());
	
	private static Map<String, HTablePool> scanHtablePools = Collections.synchronizedMap(new HashMap<String, HTablePool>());
	private static Map<String, HTablePool> getHtablePools = Collections.synchronizedMap(new HashMap<String, HTablePool>());
	private static HBaseAdmin hbaseAdmin;
	private static Configuration conf;
	private static Properties props;
	public static Properties getProps() {
		return props;
	}

	private static int htablePoolSize;
	private static int scanPoolSize;
	private static int getPoolSize;
	private static int computePoolSize;
	
	public static ExecutorService getGetES() {
		return getExecutorService;
	}
	public static ExecutorService getScanES() {
		return scanExecutorService;
	}
	public static ExecutorService getComputeES() {
		return computeExecutorService;
	}
	/**
	 * 初始化方法
	 * @throws IOException 
	 * @throws SchedulerException 
	 */
	public static void init() throws IOException, SchedulerException {
		
		if(null==props){
			InputStream resourceAsStream = HBaseBatchUtil.class.getClassLoader().getResourceAsStream("baseconf.properties");
			if(null==resourceAsStream)
				throw new NullPointerException("conf file [baseconf.properties] not found");
			props = new Properties();
			props.load(resourceAsStream);
		}
		
		isTest = Boolean.valueOf(props.getProperty("isTest", "false"));
		
		conf = HBaseConfiguration.create();
		conf.set("hbase.zookeeper.quorum", props.getProperty("hbase.zookeeper.quorum"));
		conf.set("hbase.zookeeper.property.clientPort", props.getProperty("hbase.zookeeper.property.clientPort","2181"));
		conf.set(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, props.getProperty("hbase.client.ipc.pool.size", "2"));
		
		conf.set("hbase.ipc.client.connection.maxidletime", "2000");//如果空闲2秒钟，这个链接将被关闭
		conf.set("hbase.rpc.timeout", props.getProperty("hbase.rpc.timeout","5000"));//
		conf.set("ipc.ping.interval", "2000");//每隔多长时间往server端发送一个ping
		/**
		 * 客户端查询的时候重试间隔和重试次数 
		 * 见 HConnectionImplementation#getRegionServerWithRetries 方法
		 */
		conf.set("hbase.client.pause", "30");//重试间隔时间
		conf.set("hbase.client.retries.number","1");//重试次数见HConnectionImplementation#getRegionServerWithRetries
		
		conf.set("hbase.ipc.client.tcpnodelay","true");
		conf.set("hbase.client.prefetch.limit", "128");
		hbaseAdmin = new HBaseAdmin(conf);
		
		
		scanPoolSize = Integer.parseInt(props.getProperty("scan.threadpool.size", "200"));
		getPoolSize = Integer.parseInt(props.getProperty("get.threadpool.size", "200"));
		htablePoolSize = Integer.parseInt(props.getProperty("htablepool.size", String.valueOf(Math.max(scanPoolSize, getPoolSize))));
		computePoolSize = Integer.parseInt(props.getProperty("compute.threadpool.size", "10"));
		initESs();
		//初始tableList
		initTables();
		//根据初始好的tableList初始TablePool
		initHTablePool();
		SchedulerFactory schedFact=new StdSchedulerFactory();
		scheduler = schedFact.getScheduler();
		scheduler.start();
		
		//启动定时进行线程们
		startThreads();
	
	}
	
	/**
	 * 初始化方法，手工设置了props以后可以用这个，也就是保证props不为空。
	 * @throws IOException 
	 * @throws SchedulerException 
	 */
	public static void initWithProps() throws IOException, SchedulerException {
		init();
	}

	public static void setProps(Properties props) {
		HBaseBatchUtil.props = props;
	}

	public static HTableDescriptor[] tableArrays;
	public static Boolean isTest;
	private static Scheduler scheduler;
	
	/**
	 * 根据表名获得指定的 HTableDescriptor对象
	 * @param tableName
	 * @return
	 * @see HTableDescriptor
	 */
	public static HTableDescriptor getHTD(String tableName) {

		return  htds.get(tableName);
	
	}
	
	/**
	 * 从池中获取一个htable对象
	 * @param tableName
	 * @return
	 */
	public static HTable getScanHTable(String tableName) {

		return (HTable)scanHtablePools.get(tableName).getTable(tableName);
		
	}
	
	public static HTable getGetHTable(String tableName) {

		return (HTable)getHtablePools.get(tableName).getTable(tableName);
		
	}
	
	private static void initTables() {
		initTables_();
	}
	
	public static void initHTablePool() throws IOException {
		initScanHTablePool();
		initGetHTablePool();
	}
	
	private static void initScanHTablePool() throws IOException {

		for(HTableDescriptor htd : tableArrays) {
			try {
				
				HTablePool htablePool = new HTablePool(conf,htablePoolSize);
				HTableInterface[] tempArr = new HTableInterface[htablePoolSize];
				for(int i=0; i<htablePoolSize; i++) {
					HTable table = (HTable)htablePool.getTable(htd.getName());
					tempArr[i] = table;
				}
				logger.info("init table["+htd.getNameAsString()+"]'s scan pool");
				for(HTableInterface h : tempArr) {
					h.close();
				}
				scanHtablePools.put(htd.getNameAsString(), htablePool);
			} catch(Exception e) {
				logger.error("init table["+htd.getNameAsString()+"]'s scan pool error so system exist", e);
				System.exit(1);
			}
		}
		
	}
	
	private static void initGetHTablePool() throws IOException {
		
		HTable metatable = new HTable(conf,HConstants.META_TABLE_NAME);
		 Map<HRegionInfo, HServerAddress> metaregions = metatable.getRegionsInfo();
		 metatable.clearRegionCache();
		 metatable.prewarmRegionCache(metaregions);
		 metatable.close();
		 
		 HTable roottable = new HTable(conf,HConstants.ROOT_TABLE_NAME);
		 Map<HRegionInfo, HServerAddress> rootregions = roottable.getRegionsInfo();
		 roottable.clearRegionCache();
		 roottable.prewarmRegionCache(rootregions);
		 roottable.close();

		for(HTableDescriptor htd : tableArrays) {
			
			HTablePool htablePool = new HTablePool(conf,htablePoolSize, new HTableInterfaceFactory() {
				
				@Override
				public void releaseHTableInterface(HTableInterface table)
						throws IOException {
					table.close();
				}
				
				@Override
				public HTableInterface createHTableInterface(Configuration config,
						byte[] tableName) {
					try {
					      return new HTable(tableName, HConnectionManager.getConnection(config),getExecutorService);
					} catch (IOException ioe) {
					      throw new RuntimeException(ioe);
				    }
				}
			});
			HTable table = (HTable)htablePool.getTable(htd.getName());
			//prewarm
			Map<HRegionInfo, HServerAddress> regionsInfo = table.getRegionsInfo();
			table.clearRegionCache();
			table.prewarmRegionCache(regionsInfo);
			table.close();
			
			HTableInterface[] tempArr = new HTableInterface[htablePoolSize-1];
			for(int i=0; i<tempArr.length; i++) {
				HTable tableInner = (HTable)htablePool.getTable(htd.getName());
				tempArr[i] = tableInner;
				
			}
			logger.info("init table["+htd.getNameAsString()+"]'s get pool");
			for(HTableInterface h : tempArr) {
				h.close();
			}
			getHtablePools.put(htd.getNameAsString(), htablePool);
		}
		
	}
	/**
	 * 初始化ExecutorServices
	 */
	private static void initESs() {

		final String scan = Thread.currentThread().getName();
		final String get = Thread.currentThread().getName();
		final String compute = Thread.currentThread().getName();
		
		getExecutorService = new ThreadPoolExecutor(Integer.parseInt(props.getProperty("get.threadpool.size.core", "200")),Integer.parseInt(props.getProperty("get.threadpool.size.max", "200")),Integer.parseInt(props.getProperty("threadpool.alivetime", "200")),TimeUnit.SECONDS,new ArrayBlockingQueue<Runnable>(Integer.parseInt(props.getProperty("get.threadpool.size.queue", "200"))), new ThreadFactory() {
			
			@Override
			public Thread newThread(Runnable r) {
				Thread t = new Thread(r);
				t.setName(get + "-Get-" + System.currentTimeMillis());
				t.setPriority(Thread.MAX_PRIORITY);
				return t;
			}
			
		});
		
		scanExecutorService = new ThreadPoolExecutor(Integer.parseInt(props.getProperty("scan.threadpool.size.core", "200")),Integer.parseInt(props.getProperty("scan.threadpool.size.max", "200")),Integer.parseInt(props.getProperty("threadpool.alivetime", "200")),TimeUnit.SECONDS,new ArrayBlockingQueue<Runnable>(Integer.parseInt(props.getProperty("scan.threadpool.size.queue", "200"))),new ThreadFactory() {
			
			@Override
			public Thread newThread(Runnable r) {
				Thread t = new Thread(r);
				t.setName(scan + "-Scan-" + System.currentTimeMillis());
				t.setPriority(Thread.MAX_PRIORITY);
				return t;
			}
			
		});
		
		computeExecutorService = Executors.newFixedThreadPool(computePoolSize, new ThreadFactory() {

			@Override
			public Thread newThread(Runnable r) {
				 Thread t = new Thread(r);
		            t.setName(compute + "-Compute-" + System.currentTimeMillis());
		            t.setPriority(Thread.MAX_PRIORITY);
		            return t;
			}
			
		});
	}
	
	private static void startThreads() throws SchedulerException {/*
		
		//每天早上的7点零5分执行一次
		CronTrigger listTableTrigger = TriggerBuilder.newTrigger().
				withIdentity("ListTables job").withSchedule(
		CronScheduleBuilder.cronSchedule("0 5 7 * * ?")).build();
		
		scheduler.scheduleJob(JobBuilder.newJob(new GetListTablesJob().getClass())
				.withIdentity("ListTables job").build(), listTableTrigger);
	*/}
	
	
	public static  void initTables_() {

		try {
			tableArrays = hbaseAdmin.listTables();
			if(null!=tableArrays) {
				synchronized(htds) {
					for(HTableDescriptor htd : tableArrays) {
						htds.put(htd.getNameAsString(), htd);
					}
					htds.notifyAll();
				}
			}
			logger.info("tableNumber ["+tableArrays.length+"]");
		} catch (IOException e) {
			logger.error("listTable error", e); 
		}

	}
	/**
	 * 关掉所有资源
	 * @throws SchedulerException 
	 */
	public static void close() throws SchedulerException {
		
		scheduler.shutdown();
		
		logger.info("colsing scan htabl pool");
//		scanHtablePools关掉scan的htablepool
		for(Entry<String, HTablePool> entry : scanHtablePools.entrySet()) {
			String tableName = entry.getKey();
			HTablePool tablePool = entry.getValue();
			try {
				tablePool.close();
				logger.info("close table["+tableName+"]'s pool");
			} catch (IOException e) {
				logger.error("close table["+tableName+"]'s pool error", e);
			}
		}
		logger.info("colsed all scan htabl pool");
		
		logger.info("colsing get htabl pool");
//		getHtablePools关掉get的htablepool
		for(Entry<String, HTablePool> entry : getHtablePools.entrySet()) {
			String tableName = entry.getKey();
			HTablePool tablePool = entry.getValue();
			try {
				tablePool.close();
				logger.info("close table["+tableName+"]'s pool");
			} catch (IOException e) {
				logger.error("close table["+tableName+"]'s pool error", e);
			}
		}
		logger.info("colsed all get htabl pool");
		
		logger.info("shutdowning getExecutorService");
		getExecutorService.shutdown();
		logger.info("shutdowned getExecutorService");
		
		logger.info("shutdowning scanExecutorService");
		scanExecutorService.shutdown();
		logger.info("shutdowned scanExecutorService");
		
		logger.info("shutdowning computeExecutorService");
		computeExecutorService.shutdown();
		logger.info("shutdowned computeExecutorService");
		
	}
	
}
