package com.teradata.bigdata.sink.app;

import java.io.IOException;

import com.teradata.bigdata.sink.bean.HttpVo;
import com.teradata.bigdata.sink.conf.ConfigurationManager;
import com.teradata.bigdata.sink.constant.Constants;
import com.teradata.bigdata.sink.pool.hbase.HbaseConnectionPool;
import com.teradata.bigdata.sink.pool.tool.ConnectionPoolConfig;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.spark.sql.ForeachWriter;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


import scala.Serializable;

public class ForeachWriterHBase extends ForeachWriter<Row> implements
		Serializable {
	public static final long serialVersionUID = 1L;
	private static Logger logger = LoggerFactory.getLogger(ForeachWriterHBase.class);
	public static HbaseConnectionPool pool = null;
	public  Connection conn = null;

	static {
		ConnectionPoolConfig config = new ConnectionPoolConfig();
		// 配置连接池参数
		config.setMaxTotal(300);
		config.setMaxIdle(100);
		config.setMaxWaitMillis(500);
		config.setTestOnBorrow(true);

		Configuration hbaseConfig = getHBaseConfiguration();
		hbaseConfig.set("hbase.rootdir", "hdfs://nmsq/apps/hbase/data");
		hbaseConfig.set("dfs.client.block.write.replace-datanode-on-failure.policy","ALWAYS");
		hbaseConfig.set("hbase.zookeeper.quorum", "ss-b02-m12-a01-r5300-1,ss-b02-m12-a01-r5300-3,ss-b02-m12-a01-r5300-4,ss-b02-m12-a01-r5300-5,ss-b02-m12-a01-r5300-6");
		hbaseConfig.set("hadoop.security.bdoc.access.id", "5754f80986433f4cb8de");
		hbaseConfig.set("hadoop.security.bdoc.access.key", "d76256523eb684f3f814");
		hbaseConfig.set("hbase.client.start.log.errors.counter", "1");
		hbaseConfig.set("hbase.client.retries.number", "1");
		hbaseConfig.set("zookeeper.znode.parent", "/hbase-unsecure");
		hbaseConfig.set("hbase.zookeeper.property.clientPort", "2181");
		hbaseConfig.set("hbase.client.pause", "1000");
		hbaseConfig.set("hbase.rpc.timeout", "12000");
		hbaseConfig.set("hbase.client.operation.timeout", "60000");
		hbaseConfig.set("hbase.client.scanner.timeout.period", "10000");
		hbaseConfig.set("hbase.client.write.buffer", "6291456");
		hbaseConfig.set("hbase.zookeeper.property.maxClientCnxns", "1000");
		hbaseConfig.set("hbase.regionserver.handler.count", "30000");
		pool =new HbaseConnectionPool(config, hbaseConfig);
	}
	
	public static synchronized Connection getConn() {
		return pool.getConnection();
	}
	
	@Override
	public boolean open(long partitionId, long version) {
		try {
			conn = getConn();
			return true;
		} catch (Exception e) {
			pool.returnConnection(conn);
			return false;
		}
	}

	@SuppressWarnings("unchecked")
	@Override
	public void process(Row value) {
		GenericRowWithSchema genericRowWithSchema = (GenericRowWithSchema) value;

		String valueStr = genericRowWithSchema.get(0).toString();

		String[] arr = valueStr.split("\\|", -1);
		if (arr.length == 98){
			HttpVo httpVo = new HttpVo(arr[0],arr[1],arr[2],arr[3],arr[4],arr[5],arr[6],arr[7],arr[8],arr[9],arr[10],arr[11],arr[12],arr[13],arr[14],arr[15],arr[16],arr[17],arr[18],arr[19],arr[20],arr[21],arr[22],arr[23],arr[24],arr[25],arr[26],arr[27],arr[28],arr[29],arr[30],arr[31],arr[32],arr[33],arr[34],arr[35],arr[36],arr[37],arr[38],arr[39],arr[40],arr[41],arr[42],arr[43],arr[44],arr[45],arr[46],arr[47],arr[48],arr[49],arr[50],arr[51],arr[52],arr[53],arr[54],arr[55],arr[56],arr[57],arr[58],arr[59],arr[60],arr[61],arr[62],arr[63],arr[64],arr[65],arr[66],arr[67],arr[68],arr[69],arr[70],arr[71],arr[72],arr[73],arr[74],arr[75],arr[76],arr[77],arr[78],arr[79],arr[80],arr[81],arr[82],arr[83],arr[84],arr[85],arr[86],arr[87],arr[88],arr[89],arr[90],arr[91],arr[92],arr[93],arr[94],arr[95],arr[96],arr[97]);
			String targetStr=httpVo.owner_city + "," +
					httpVo.imsi + "," +
					httpVo.imei + "," +
					httpVo.sgw_ggsn_ip_add + "," +
					httpVo.eci + "," +
					httpVo.apn + "," +
					httpVo.procedure_start_time + "," +
					httpVo.procedure_end_time + "," +
					httpVo.app_type + "," +
					httpVo.app_sub_type + "," +
					httpVo.l4_protocal + "," +
					httpVo.app_server_ip_ipv4 + "," +
					httpVo.app_server_port + "," +
					httpVo.ul_data + "," +
					httpVo.dl_data + "," +
					httpVo.ul_ip_packet + "," +
					httpVo.dl_ip_packet + "," +
					httpVo.ul_disorder_ip_packet + "," +
					httpVo.dl_disorder_ip_packet + "," +
					httpVo.ul_retrans_ip_packet + "," +
					httpVo.dl_retrans_ip_packet + "," +
					httpVo.tcp_response_time + "," +
					httpVo.tcp_ack_time + "," +
					httpVo.first_req_time + "," +
					httpVo.first_response_time + "," +
					httpVo.window_size + "," +
					httpVo.mss + "," +
					httpVo.tcp_syn_num + "," +
					httpVo.tcp_status + "," +
					httpVo.message_type + "," +
					httpVo.message_status + "," +
					httpVo.first_http_response_time + "," +
					httpVo.last_content_packet_time;

			String dbName = "b_yz_app_td_hbase";
			String tableName = "ssthbase";
			//获取imsi
			String a = httpVo.imsi;
			//获取时间
			String b = httpVo.procedure_start_time;
			//以imsi和时间作为rowkey
			String rowkey = a + b;
			//列簇
			String columnFamily = "cf";

			// 调用数据解析器
			tableName = dbName + ":" + tableName;

			HTableDescriptor table = new HTableDescriptor(
					TableName.valueOf(tableName));
			table.addFamily(new HColumnDescriptor(Constants.CF_DEFAULT)
					.setCompressionType(Algorithm.NONE));

			Table tablePut = null;
			Admin admin = null;
			try {
				tablePut = conn.getTable(TableName.valueOf(tableName));
				admin = conn.getAdmin();
				TableName tName = table.getTableName();
				if (!admin.tableExists(tName)) {
					try {
						admin.createTable(table);
						tablePut = conn.getTable(TableName.valueOf(tableName));
						// admin.flush(tName);
					} catch (Exception e) {
						logger.error("建表失败： ->" + tableName);
					}
				}
			} catch (IOException e1) {
				logger.error("获取tablePut或Admin失败： ->" + tableName);
			}

			try {
				Put put = setDataPut(rowkey, columnFamily,targetStr);
				tablePut.put(put);
			} catch (Exception e) {
				logger.error("写入数据失败： ->" + tableName + "-" + targetStr);
			}

			try {
				admin.close();
				tablePut.close();
			} catch (IOException e) {
				logger.error("关闭tablePut或Admin失败： ->" + tableName);
			}
//		pool.returnConnection(conn);
		}
	}

	@Override
	public void close(Throwable errorOrNull) {
		pool.returnConnection(conn);
	}
	
	
	public static Configuration getHBaseConfiguration() {
		Configuration conf = null;
		try {
			conf = HBaseConfiguration.create();
			conf.set("hbase.zookeeper.quorum", "ss-b02-m12-a01-r5300-1,ss-b02-m12-a01-r5300-3,ss-b02-m12-a01-r5300-4,ss-b02-m12-a01-r5300-5,ss-b02-m12-a01-r5300-6");
			conf.set("hbase.defaults.for.version.skip", "true");
			
		} catch (Exception e) {
			logger.error("获取HBaseConfiguration出错，请检查是否有配置文件和ZK是否正常。ZK链接： ->"
					+ ConfigurationManager
							.getProperty(Constants.ZK_METADATA_BROKER_LIST));
		}
		return conf;
	}
	
	public static Put setDataPut(String tableRowkey, String columnFamily,
			String tableData) {
		Put put = new Put(Bytes.toBytes(tableRowkey));
		put.addColumn(Bytes.toBytes(columnFamily),
				Bytes.toBytes("1"),
				Bytes.toBytes(tableData));
		return put;
	}
}