/**
 *
 */
package org.cmcc.pcap.jpcap.tcp.http;

import java.util.ArrayList;
import java.util.Date;
import java.util.Iterator;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.cmcc.pcap.jpcap.tcp.TcpConnection;
import org.cmcc.pcap.jpcap.tcp.TcpProtocolHandler;
import org.cmcc.pcap.jpcap.tcp.http.xdr.HttpXdrGenerate;
import org.cmcc.pcap.jpcap.util.CommonStatistics;

import com.cmcc.xdr.db.PcapHttpDao;

/**
 * @Date 2018年3月21日 上午10:06:33
 * @author Amish
 *
 */
public class HttpHandler extends TcpProtocolHandler {
//	@Autowired
//	private  HttpTransactionDao httpTransactionDao;
	private static Logger logger = LogManager.getLogger(HttpHandler.class);
	private HttpXdrGenerate httpXdrGenerate;
	public HttpHandler() {
		httpXdrGenerate =  new HttpXdrGenerate();
	}
	
	@Override
	public void handle(TcpConnection tcpConnection,String taskId) {
		
		logger.debug(new Date(tcpConnection.lastPacketTime));//最后一个packet的处理时间(注:非抓包时间)
		long pcapTimestamp  = tcpConnection.al.get(tcpConnection.al.size()-1).tcpPacket.sec * 1000;//pcap的timestamp的单位是秒，这里需要转成毫秒
		logger.debug(new Date(pcapTimestamp));//最后一个packet的抓包时间
		
		
		/**
		 * Connection 内TCP 分段(segment)处理：乱序重组、重传清洗、TODO 默认保留前2个分段且可配？
		 */
		tcpConnection.tcpConnectionPacketsDeal();//duplicate ack merge、retransmission and spurious retransmission merge、segments reassemble
		logger.debug("this connection <"+tcpConnection.connectionKey+"> hava packets (initial)  : "+tcpConnection.packetsNumInitial);
		logger.debug("this connection <"+tcpConnection.connectionKey+"> hava packets (after duplicate ack merge)  : "+tcpConnection.packetsNumAfterDuplicateAckMerge);
		logger.debug("this connection <"+tcpConnection.connectionKey+"> hava packets (after retransmission merge)  : "+tcpConnection.packetsNumAfterRetransmissionMerge);
		logger.debug("this connection <"+tcpConnection.connectionKey+"> hava packets (after reorder packets)  : "+tcpConnection.packetsNumAfterReorderTcpPacket);
		logger.debug("this connection <"+tcpConnection.connectionKey+"> hava packets (after reassemble) : "+tcpConnection.packetsNumAfterReassemble);
		
//		for(TcpPacketDecorator tpd : tcpConnection.al){
//			logger.error("-----------------------------------");
//			logger.error(new String(tpd.getData()));
//			logger.error("-----------------------------------");
//		}
//		for(TcpPacketDecorator tpd : tcpConnection.retransmissionMergeAl){
//			logger.error("++++++++++++++++++++++++++++++++++++");
//			logger.error(new String(tpd.getData()));
//			logger.error("++++++++++++++++++++++++++++++++++++");
//		}
//		for(TcpPacketDecorator tpd : tcpConnection.reassembleAl){
//			logger.error("====================================");
//			logger.error(new String(tpd.getData()));
//			logger.error("====================================");
//		}
		
		
		/**
		 * TODO HTTP事务识别：按HTTP事务重新对TcpConnection进行划分成多个TcpConnection
		 * 握手信息放在第一个HTTP事务中
		 * 拆链信息放在最后一个HTTP事务中
		 */
		ArrayList<HttpTransaction> al = HttpTransactionSplit.split(tcpConnection);
		logger.debug("this connection <"+tcpConnection.connectionKey+"> splited http transaction !");
		
		Iterator<HttpTransaction> i = al.iterator();
		//HttpXdrGenerate hxg = new HttpXdrGenerate();
		while(i.hasNext()) {
			HttpTransaction hT = i.next();
			//TODO 按照开始时间散列到不同线程，to生成文件，
			try {
				httpXdrGenerate.generate(hT);
			} catch (Exception e) {
				logger.error(e.getCause(),e);
			}
		}
//		HttpTransactionDao httpTransactionDao = new HttpTransactionDao();
		
		PcapHttpDao.save(httpXdrGenerate.httpEntityList,taskId);
		CommonStatistics.getInstance().addHttpEntityNum(httpXdrGenerate.httpEntityList.size());
		httpXdrGenerate.httpEntityList.clear();
		//HttpXdrGenerate.httpEntityList = new ArrayList<HttpEntity>();
		logger.debug("this connection <"+tcpConnection.connectionKey+"> saved into database !");
		
		
		
	}

}
