package fnic.prehand.esagent.passitive_measure2;

 
import java.util.Calendar;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

import org.apache.commons.pool2.ObjectPool;
import org.apache.commons.pool2.impl.GenericObjectPool;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.apache.log4j.Logger;
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequestBuilder;
import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.settings.Settings;

import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
import redis.clients.jedis.Pipeline;
import redis.clients.jedis.Response;
import fnic.prehand.augment.Flow;
import fnic.prehand.common.AgentConfig;
import fnic.prehand.esagent.EsAgent;
import fnic.river.RedisBulkLoop;
import fnic.river.RedisRiver;

public class PassitiveRedisRiver extends RedisRiver implements RedisBulkLoop, Runnable{
	private static final AgentConfig config = AgentConfig.getInstance();
	private static final Logger logger = Logger.getLogger(PassitiveRedisRiver.class);
	
	private static volatile boolean runfg = true;
	private static final int FLOWFIELDSIZE = 19;
	private static final int bulksize = config.getRedisoncedealnum();
	private static final int clientnum = config.getclientWorkNum();
	private static final String redis_key = config.getPredisKey();
	private static final int indexNumPerDay = config.getPaggr_interval();
//	private static final String indexName = config.getPesIndexName();
	
	Response<String>[] rsps =  new Response[config.getRedisoncedealnum()];	
	private MakeAugment augmentmaker; //扩维

	private ThreadPoolExecutor thdpool;
	GenericObjectPoolConfig poolConfig;
	ObjectPool<PassitiveAgent> objpool;
	
	public PassitiveRedisRiver(String redishost, int redisport, String key){
		super(redishost, redisport, key);
		poolConfig = new GenericObjectPoolConfig();
		poolConfig.setMaxTotal(clientnum);
		poolConfig.setTestOnBorrow(false);
		poolConfig.setTestOnReturn(false);
		objpool = new GenericObjectPool<PassitiveAgent>(new PoolAgentFactory(), poolConfig);
		thdpool = (ThreadPoolExecutor)Executors.newFixedThreadPool(Math.max(clientnum,Runtime.getRuntime().availableProcessors()));
		augmentmaker = new MakeAugment();
		
		Calendar ca = Calendar.getInstance();
	    int year = ca.get(Calendar.YEAR);//获取年份
	    int month=ca.get(Calendar.MONTH)+1;//获取月份 
	    int day=ca.get(Calendar.DATE);//获取日
//	    String indexName=year+"-"+month+"-"+day;
		int hour = ca.get(Calendar.HOUR_OF_DAY);// 获取小时
		String indexName = year+"-"+month+"-"+day+"_"+hour/indexNumPerDay;
		createIndex(indexName);
	}

	
	private static synchronized boolean createIndex(String idx){        
		Settings settings = ImmutableSettings.settingsBuilder()
		        .put("number_of_replicas", 0)
		        .put("index.refresh_interval", -1)
		        .put("index.warmer.enabled", false)
		        .put("index.translog.flush_threshold_size", "300mb")
		        .put("index.translog.flush_threshold_period", "50m")
		        .put("index.translog.interval", "20s")
		        .put("index.gateway.local.sync", "20s")
		        .put("index.merge.scheduler.max_thread_count", 1)
		        .put("index.merge.policy.max_merged_segment", "1g")
		        .put("index.merge.policy.segments_per_tier", 20)
		        .put("index.store.throttle.max_bytes_per_sec", "5mb")
		        .build();

		IndicesExistsResponse existRsp = EsAgent.getClient().admin().indices().prepareExists(idx).execute().actionGet();
		if(existRsp.isExists()){
			return true;
		}
		CreateIndexResponse crtRsp = EsAgent.getClient().admin().indices().create(new CreateIndexRequest(idx, settings)).actionGet();
		if(!crtRsp.isAcknowledged()){
			logger.error("create index["+ idx +"] error");
			return false;
		}
		return true;
	}
	
	public int loopBulk(){
		int count = 0;
		Response<String>[] rsps =  new Response[config.getRedisoncedealnum()];	
		for (int i = 0; i < redisoncesize; i++) {
			rsps[i] = (Response<String>) jpline.rpop(redisKey);
		}
		try {
			jpline.sync();
		} catch (Exception e) {
			logger.error("!!!!------------redis sync get rsp fail: "+ e.getMessage());
			return -1;
		}
		
		for (int i = 0; i < redisoncesize; i++) {
			if (rsps[i] == null || rsps[i].get() == null) {
				break;
			}
			count++;
		}
		dealRecords(rsps);
		cycle++;
		return count;
	}
	
	public void run() {
		int existnum = 0;
		while(true){
			existnum = loopBulk();
			if(existnum < redisoncesize){
				try {
					Thread.sleep(5000);
					logger.info("redis item num="+existnum+", idle_obj_num="+objpool.getNumIdle()+
							", active_obj_num="+objpool.getNumActive()+", then sleep 5 sec...");
				} catch (InterruptedException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}
			}
		}
	}
	

	public static void setRunfg(boolean runfg) {
		PassitiveRedisRiver.runfg = runfg;
	}


	/*
	 * (non-Javadoc)
	 * @see fnic.river.RedisBulkLoop#dealRecords()
	 * 开启一个线程处理这一批数据，从pool中申请一个agent object，处理完毕后返回该agent
	 */
	public void dealRecords(Response<String>[] responses) {
//		new Thread(new bulkIndexDealer(responses)).start();
		thdpool.execute(new bulkIndexDealer(responses));
	}

	
	class bulkIndexDealer implements Runnable{
		Response<String>[] rsps;
		PassitiveAgent agent;
		Flow flow;
		String idxname;
		
		bulkIndexDealer(Response<String>[] responses){
			flow = new Flow();
			rsps = responses;
			try {
				agent = objpool.borrowObject();
			} catch (NoSuchElementException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			} catch (IllegalStateException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			} catch (Exception e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
			
			Calendar ca = Calendar.getInstance();
			int year = ca.get(Calendar.YEAR);// 获取年份
			int month = ca.get(Calendar.MONTH) + 1;// 获取月份
			int day = ca.get(Calendar.DATE);// 获取日
			int hour = ca.get(Calendar.HOUR_OF_DAY);// 获取小时
		    idxname=year+"-"+month+"-"+day+"_"+hour/indexNumPerDay;
//			idxname = year + "-" + month + "-" + day;

			// 更新agent的indexname，实现按天分区索引
		    String oldidxname = agent.getIndexName();
			if (!idxname.equals(oldidxname)) {
				//优化原来的索引
				OptimizeRequest optreq = new OptimizeRequest(oldidxname);
				optreq.maxNumSegments(1).force(true);
				OptimizeResponse optrsp = agent.getClient().admin().indices().optimize(optreq).actionGet();
				int failshardnum = optrsp.getFailedShards();
				int succeedshardnum = optrsp.getSuccessfulShards();
				if(failshardnum>0){
					logger.info("optimize index("+oldidxname+")segments fail in "+failshardnum+" shards["+succeedshardnum+"]");
				}
				
				//开启线程进行汇聚
				
				//关闭索引
				CloseIndexRequest closereq = new CloseIndexRequest(oldidxname);
				CloseIndexResponse closersp = agent.getClient().admin().indices().close(closereq).actionGet();
				boolean ack = closersp.isAcknowledged();
				if(!ack){
					logger.info("close index("+oldidxname+") fail");
				}
				
				
				
				
				//打开新索引
				createIndex(idxname);
				agent.setIndexName(idxname);
			}
		    
			//触发优化之前的index
		}
		
		public void run() {
			// TODO Auto-generated method stub
			for(Response<String> rsp : rsps){
				if(rsp == null){
					break;
				}
				String item = rsp.get();
				if(item == null){
					break;
				}
				
				String[] strfields = item.split(",");
				if (strfields.length != FLOWFIELDSIZE) {
					logger.error("flow field size[" + strfields.length+ "] error, shoule be 19.");
					continue;
				}
				
				flow.num_oldFlow = new long[FLOWFIELDSIZE];
				long[] num_flow = flow.num_oldFlow;

				for (int n = 0; n < FLOWFIELDSIZE; n++) {
					num_flow[n] = Long.parseLong(strfields[n].trim());
				}
				augmentmaker.augment(flow);
				
				agent.atomProcess(flow);
			}
			agent.processBulkIfNeeded(true);
			
			try {
				objpool.returnObject(agent);
			} catch (Exception e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
		}
		
	}

}
