/**
 * 
 * @description TODO
 * @author [xuwei3]
 * @version [版本号,2014-11-4]
 * @see [相关类/方法]
 * @since [产品/模块版本]
 */
package com.ailk.bigdata.etl.realstream.server.tools;

import io.netty.channel.ChannelHandlerContext;

import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.LinkedBlockingQueue;

import org.apache.mina.util.ConcurrentHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.ailk.bigdata.etl.common.server.constant.Constant;
import com.ailk.bigdata.etl.common.server.constant.OSInfo;
import com.ailk.bigdata.etl.common.server.log.LogBackWatchdogFactory;
import com.ailk.bigdata.etl.common.server.model.DatabaseInfo;
import com.ailk.bigdata.etl.realstream.server.dao.RealStreamUnitDao;
import com.ailk.bigdata.etl.realstream.server.dao.impl.RealStreamUnitDaoPGImpl;
import com.ailk.bigdata.etl.realstream.server.model.KafkaProduceException;
import com.ailk.bigdata.etl.realstream.server.model.RealDataDispatcher;
import com.ailk.bigdata.etl.realstream.server.model.RealStreamDataPack;
import com.ailk.bigdata.etl.realstream.server.model.RealStreamUnit;
import com.ailk.bigdata.etl.realstream.server.model.RealStreamUser;
import com.ailk.bigdata.etl.realstream.server.model.ValueEvent;
import com.ailk.bigdata.etl.realstream.server.monitor.RealEventCounter;
import com.ailk.bigdata.etl.realstream.server.service.DataDistributeService;
import com.ailk.bigdata.etl.realstream.server.service.FormatHandler;
import com.ailk.bigdata.etl.realstream.server.service.impl.DataDistributeServiceImpl;
import com.ailk.bigdata.etl.realstream.server.service.impl.RollingFileWriter;
import com.lmax.disruptor.RingBuffer;

/**
 * 
 * @description 实时流处理主入口
 * @author [xuwei3]
 * @version [版本号,2014-11-4]
 * @see [相关类/方法]
 * @since [产品/模块版本]
 */
public class RealStreamTool
{
	private static Logger logger = LoggerFactory.getLogger(RealStreamTool.class);
	private static RealStreamTool tool = new RealStreamTool();
	public static ConcurrentHashMap<String, Integer> USER_HEART_COUNT = new ConcurrentHashMap<String, Integer>();// 命令端口心跳次数记录
	public static ConcurrentHashMap<String, RealStreamUser> AUTHENTICATION_INFO_CACHE = new ConcurrentHashMap<String, RealStreamUser>();// 待认证用戶信息
	public static ConcurrentHashMap<String, RealStreamUser> AUTHORISED_USER_INFO = new ConcurrentHashMap<String, RealStreamUser>();// 认证通过的用戶信息
	public static ConcurrentHashMap<Long, FormatHandler> DATA_FORMAT_HANDLER_MAP = new ConcurrentHashMap<Long, FormatHandler>();// 格式化责任链
	public static Set<Long> DATA_SEND_KAFKA_CAHCE = new HashSet<Long>();// kafka发送
	public static ConcurrentHashMap<String, ChannelHandlerContext> ACCEPTING_CHANNEL_CACHE = new ConcurrentHashMap<String, ChannelHandlerContext>();// 认证通过的数据端口channel
	public static ConcurrentHashMap<Integer, RingBuffer<ValueEvent>> UNITCODE_ACCEPTED_DISRUPTOR = new ConcurrentHashMap<Integer, RingBuffer<ValueEvent>>();// 接口单元对应的数据队列
	public static LinkedBlockingQueue<RealStreamDataPack> BACK_FILESYS_DATA_QUEUE = new LinkedBlockingQueue<RealStreamDataPack>();// 接收到的需要备份到文件系统的消息
	public static ConcurrentHashMap<Long, RealStreamUnit> REALS_UNIT_CACHE = new ConcurrentHashMap<Long, RealStreamUnit>();// 数据单元编号和数据单元映射关系
	public static ConcurrentHashMap<Integer, CopyOnWriteArrayList<RealStreamUnit>> DATAPORT_UNIT_CACHE = new ConcurrentHashMap<Integer, CopyOnWriteArrayList<RealStreamUnit>>();// 被动模式数据端口和数据单元映射关系
	public static ConcurrentHashMap<Long, RealDataDispatcher> DISPATCH_CAHCE = new ConcurrentHashMap<Long, RealDataDispatcher>();// 分发
	public static ConcurrentHashSet<Integer> PASSIV_PORT = new ConcurrentHashSet<Integer>();
	public static ConcurrentHashMap<Long, RollingFileWriter> UNITCODE_FILEBACK_BUS_DISRUPTOR = new ConcurrentHashMap<Long, RollingFileWriter>();// 接口单元文件备份对应的数据队列
	public static String THREAD_DISPATCHER_POOL_NAME = "real_stream_dispatcher_pool";
	public static String THREAD_FILEMGR_POOL_NAME = "file-manager-agent";
	public static int ctlCmdPort = 7501;
	public static int realDataPort = 7502;
	public static DatabaseInfo dbInfo = null;
	public static int rollInterval;
	public static int rollSize;
	public static int rollCount;
	public static int batchSize;

	public static void main(String[] args)
	{
		try
		{
			if (Constant.RETURN_OK != tool.verify(args))
			{
				logger.error("实时流系统初始化失败");
				System.exit(-1);
			}
			tool.init();
		}
		catch (KafkaProduceException e)
		{
			logger.error("实时流系统启动异常",e);
			System.exit(-1);
		}
	}

	public int init() throws KafkaProduceException
	{
		RealStreamUnitDao streamUnitDao = new RealStreamUnitDaoPGImpl();
		List<RealStreamUnit> allUnits = streamUnitDao.getAllStreamUnit();
		List<String> attribute = new ArrayList<String>();
		for (int i = 0; i < allUnits.size(); i++)
		{
			attribute.add(Long.toString(allUnits.get(i).getOnlyUnitCode()));
			logger.info("统计单元接口信息[{}]............", allUnits.get(i).getOnlyUnitCode());
		}

		RealEventCounter eventCounter = RealEventCounter.getRealEventCounter(System.getProperty("user.name"), attribute);
		eventCounter.start();
		eventCounter.startSchedulerdMonitor();

		// 数据分发
		DataDistributeService distService = new DataDistributeServiceImpl();
		distService.initDataFormatHandler();
		distService.initDataDistributeTh(eventCounter);
		distService.startPassiveSendListenerTh();
		distService.initKafkaMessSendTh();
		distService.startBackToFilesystemThread();
		distService.initDataAcceptThread(eventCounter);

		return Constant.RETURN_OK;
	}

	public int verify(String[] args)
	{
		if (args.length != 4)
		{
			System.err.print("启动错误，系统参数错误");
			System.exit(1);
		}
		rollInterval = Integer.parseInt(args[0]);
		rollSize = Integer.parseInt(args[1]);
		rollCount = Integer.parseInt(args[2]);
		batchSize = Integer.parseInt(args[3]);
		logger.info("落地文件配置信息_rollInterval[{}]rollSize[{}]rollCount[{}]batchSize[{}]", rollInterval, rollSize, rollCount, batchSize);
		// 测试代码
		dbInfo = new DatabaseInfo();
		dbInfo.setDatabaseType(Constant.Postgresql);
		dbInfo.setJdbcUrl("jdbc:postgresql://10.152.240.10:5432/etldb");
		dbInfo.setUserName("etl");
		dbInfo.setUserPassword("ZXRs");
		if (!System.getProperty("os.arch").contains("64"))
		{
			System.out.println("32bit JVM detected.  It is recommended to run etl_dqm on a 64bit JVM for better performance.");
		}else{
			// logBack初始化
			LogBackWatchdogFactory.initFileRunLogging("../conf/logback.xml", 60000);
		}
		if (Constant.RETURN_OK != OSInfo.verifyPlatform())
		{
			System.err.println("系统错误，运行失败");
			return Constant.RETURN_FAILD;
		}
		return Constant.RETURN_OK;
	}
}
