package com.gdlt.mq2db;

import java.math.BigDecimal;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;

import org.apache.hadoop.conf.Configuration;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * Created by CM on 2017/3/14.
 */
public class PGWriter extends Thread implements Offsetable, Writable  {

	Connection connection = null;
	private int partitionId;
	private long minBatchSize;
	private long offset;
	private String recValue;
	private String ip;
	private String index;
	private BigDecimal value;// 对应数据库的decimal类型，若用double精度显示不全
	private int ts;
	private static AtomicBoolean flag = new AtomicBoolean(true);
	private BlockingQueue<ConsumerRecord<String, String>> queue;

	private final static Logger LOG = LoggerFactory.getLogger(PGWriter.class);

	public PGWriter(BlockingQueue<ConsumerRecord<String, String>> queue) {
		this.queue = queue;
	}

	private Connection getConn() { // 得到数据库的连接
		if (connection == null) {
			Configuration conf = Kafka2pgConfig.getConf();
			String url = conf.get(Kafka2pgConfig.PG_URL_CONF_NAME);
			String user = conf.get(Kafka2pgConfig.PG_USER_CONF_NAME);
			String password = conf.get(Kafka2pgConfig.PG_PASSWORD_CONF_NAME);
			String driver = conf.get(Kafka2pgConfig.PG_DRIVER_CONF_NAME);
			partitionId = conf.getInt(Kafka2pgConfig.PARTIONID_CONF_NAME, Kafka2pgConfig.PARTIONID_DEFAULT_VALUE);
			minBatchSize = conf.getLong(Kafka2pgConfig.BATCHSIZE_CONF_NAME, Kafka2pgConfig.BATCHSIZE_DEFAULT_VALUE);
			try {
				Class.forName(driver);// 加载驱动程序，你的jar包要用上啊
				connection = DriverManager.getConnection(url, user, password);
				System.out.println("成功连接pg数据库:" + connection);
			} catch (ClassNotFoundException e) {
				LOG.error("未加载数据库驱动程序" + e);
			} catch (SQLException e) {
				LOG.error("连接数据库异常" + e);
			}
		}
		return connection;
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see com.gdlt.mq2db.Offsetable#getLastoffset()
	 */
	public long getLastoffset() {
		long lastOffset = -1;
		connection = getConn();
		PreparedStatement pstmt = null;
		// String sql = "select max(recset) from goliaoffset";
		String sql = "select recset from goliaoffset where partition = '" + partitionId + "'";
		try {
			pstmt = connection.prepareStatement(sql);
			ResultSet rs = pstmt.executeQuery();
			if (rs.next() && rs.getObject(1) != null) {// 由于统计结果即使没记录也会返回null，所以加多个判断条件
				String strOffset = rs.getString(1);
				lastOffset = Long.parseLong(strOffset);
			}
			System.out.println("读取数据库中offset:" + lastOffset);

		} catch (SQLException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return lastOffset;
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see com.gdlt.mq2db.Writable#write(java.util.List, long)
	 */

	public void write(List<ConsumerRecord<String, String>> data,long offset) {
		try {
				long start = System.currentTimeMillis();
				connection = getConn();
				PreparedStatement pstmt = null; // 数据库操作，插入数据到goliastream表中
				PreparedStatement pstmtSet = null; // 数据库操作，插入数据到goliaoffset表中

					// String sql = "insert into goliastream(ip,index,value,ts)
					// values(?,?,?,?)"
					// + "on conflict (ip,index,ts) do nothing ";
					String sql = "insert into goliastream(ip,index,value,ts) values(?,?,?,?)";

					String sqlSet = "insert into goliaoffset(recset,partition) values(?,?)"
							+ "on conflict(partition) do update set recset = ? ";

					connection.setAutoCommit(false);
					pstmt = connection.prepareStatement(sql);
				    pstmtSet = connection.prepareStatement(sqlSet);

					for (int i = 0; i < data.size(); i++) { // 先对消息队列中的value值根据空格划分
						recValue = data.get(i).value();
						String splitBlank[] = recValue.split(" ");
						String splitComma[] = splitBlank[0].split("\\.");// 根据点号分隔

						ip = splitComma[1].replace('_', '.');
						index = splitBlank[0].substring(splitComma[0].length() + splitComma[1].length() + 2);
						value = new BigDecimal(splitBlank[1]);// 传入String
						ts = Integer.parseInt(splitBlank[2]);

						pstmt.setString(1, ip);
						pstmt.setString(2, index);
						pstmt.setBigDecimal(3, value);
						pstmt.setInt(4, ts);
						pstmt.addBatch();
					}
					// int[] counts = pstmt.executeBatch();
					pstmt.executeBatch();
				
					 offset = data.get(data.size() - 1).offset();
					 String offsetStr = String.valueOf(offset);
					 pstmtSet.setString(1, offsetStr);
					 pstmtSet.setInt(2, partitionId);
					 pstmtSet.setString(3, offsetStr);
					// pstmtSet.addBatch();
					 pstmtSet.executeUpdate();
					connection.commit();// 一起提交

				    pstmt.close(); // 操作关闭
					pstmtSet.close(); // 操作关闭
					
					long end = System.currentTimeMillis();
					LOG.info("写入"+data.size()+"条数据耗时(s):" + ((double) (end - start)) * 1000);
				}
			catch(SQLException e)
	{
		try {

			connection.rollback();// 回滚操作
			LOG.info("回滚成功");
		} catch (Exception e2) {
			// TODO: handle exception
			LOG.error("回滚异常： " + e2);
		}
		// TODO Auto-generated catch block
		LOG.error("插入数据库异常: " + e);
	}
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see com.gdlt.mq2db.Writable#isClosed()
	 */
	public boolean isClosed() {
		return flag.get();
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see com.gdlt.mq2db.Writable#close()
	 */
	public void close() {
		try {
			// 就是说这个从true到false的过程才算被关闭
			if (flag.compareAndSet(true, false)) {
				connection.close();
			}
		} catch (SQLException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

	public void run() {
		System.out.println( " PGWriter id is " + Thread.currentThread().getId());
		List<ConsumerRecord<String, String>> data = new ArrayList<ConsumerRecord<String, String>>();
		try {
			while (true) {
				data.add(queue.take());
				if (data.size() >= minBatchSize) {
					write(data,offset);
					data.clear();
				}
			}

		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}
}
