package com.duowan.realtime.thrift;

import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.duowan.realtime.model.OverviewComputingGroup;
import com.duowan.realtime.server.util.BeanFactory;
import com.duowan.realtime.uniq.algorithm.bloomfilter.BloomFilter;
import com.duowan.realtime.webservice.DataComputingWebService;

public class BloomFilterDB {
	
	private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
	// 有被做partitioned的 partitionedBloomFilter ,类似与数据库的table概念
	private Map<String, PartitionedBloomFilter> partitionedBloomFiltersInDb = new ConcurrentHashMap<String, PartitionedBloomFilter>();
	
	private static DataComputingWebService dataComputingWebService = BeanFactory.getComputingWebService();
	
	private int dumpIntervalSeconds = 1800;
	
	private int reportIntervalSeconds = 60;
	
	private String baseDir;
	
	private static final Logger logger = LoggerFactory.getLogger("bflog") ;
	
	public BloomFilterDB(String baseDir) {
		super();
		this.baseDir = baseDir;
		startDumpBloomFilterDb();
		startReportBFStatus();
	}

	public PartitionedBloomFilter getPartitionedBloomFilter(String bloomFilterGroup) {
		PartitionedBloomFilter result = partitionedBloomFiltersInDb.get(bloomFilterGroup);
		if(result == null) {
			result = new PartitionedBloomFilter(baseDir+"/"+bloomFilterGroup);
			partitionedBloomFiltersInDb.put(bloomFilterGroup, result);
		}
		return result;
	}
	
	public BloomFilter getBloomFilterIfNotExistAndInit(String bloomFilterGroup,String partition) {
		PartitionedBloomFilter bf = getPartitionedBloomFilter(bloomFilterGroup);
		return bf.getBloomFilter(partition);
	}
	
	public void startReportBFStatus() {
		Runnable task = new Runnable() {
			@Override
			public void run() {
				logger.info("start to report status bloomfilter status.  " + ",db=" + partitionedBloomFiltersInDb.keySet());
				for (Entry<String, PartitionedBloomFilter> pbfEntry : partitionedBloomFiltersInDb.entrySet()) {
					try {
						for (Entry<String, BloomFilter> bfEntry : pbfEntry.getValue().getPartitons().entrySet()) {
							OverviewComputingGroup overviewComputingGroup = initOverviewComputingGroup(pbfEntry.getKey(),bfEntry);
							dataComputingWebService.reportDCGroupStatus(overviewComputingGroup);
							logger.info("The OverviewComputingGroup values:" + overviewComputingGroup);
						}
					} catch (Exception e) {
						logger.error(" report status bloomfilter status error! PartitionedBloomFilter,key=" + pbfEntry.getKey() + ",value=" + pbfEntry.getValue(), e);
					}
				}
			}

			private OverviewComputingGroup initOverviewComputingGroup(String groupKey, Entry<String, BloomFilter> entry) {
				OverviewComputingGroup overviewComputingGroup = new OverviewComputingGroup();
				overviewComputingGroup.setBitsetSize((long)entry.getValue().size());
				overviewComputingGroup.setPartition(entry.getKey());
				overviewComputingGroup.setElementNum((long)entry.getValue().getNumberOfAddedElements());
				overviewComputingGroup.setComputingGroup(groupKey);
				return overviewComputingGroup;
			}
		};
		
		executor.scheduleAtFixedRate(task, reportIntervalSeconds, reportIntervalSeconds , TimeUnit.SECONDS);
	}
	
	public void startDumpBloomFilterDb() {
		Runnable task = new Runnable() {
			@Override
			public void run() {
				logger.info("start to dump BloomFilterDb. baseDir= "+ baseDir + ",db=" + partitionedBloomFiltersInDb.keySet());
				for (PartitionedBloomFilter pbf : partitionedBloomFiltersInDb.values()) {
					try {
						pbf.dump();
						pbf.cleanNoChangePartitionsFromMemory();
					} catch (Exception e) {
						logger.error(" dump PartitionedBloomFilter error! PartitionedBloomFilter = " + pbf, e);
					}
				}
			}
		};
		
		executor.scheduleAtFixedRate(task, dumpIntervalSeconds, dumpIntervalSeconds , TimeUnit.SECONDS);
	}
	
}
