package com.duowan.realtime.thrift;

import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.duowan.realtime.uniq.algorithm.hyperloglog.HyperLogLog;

public class HyperLogLogDB {
	private static Logger logger = LoggerFactory.getLogger("hlllog");
	private Map<String, PartitionedHyperLogLog> partitionedHyperloglogsInDb = new ConcurrentHashMap<String, PartitionedHyperLogLog>();
	private String baseDir = null;
	private int dumpIntervalSeconds = 1800;
	private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
	
	public HyperLogLogDB(String baseDir) {
		super();
		this.baseDir = baseDir;
		startDumpHyperloglogDb();
	}

	public Map<String, PartitionedHyperLogLog> getDb(){
		return partitionedHyperloglogsInDb;
	}
	
	public PartitionedHyperLogLog getPartitionedHyperLogLog(String hllGroup) {
		PartitionedHyperLogLog result = partitionedHyperloglogsInDb.get(hllGroup);
		synchronized (this) {
			if(result == null) {
				result = loadPartition(hllGroup);
				partitionedHyperloglogsInDb.put(hllGroup, result);
			}
		}
		return result;
	}

	private synchronized PartitionedHyperLogLog loadPartition(String hllGroup) {
		try {
			PartitionedHyperLogLog p = new PartitionedHyperLogLog(baseDir+"/"+hllGroup);
			return p;
		}catch(Exception e) {
			throw new RuntimeException("load partition error",e);
		}
	}
	
	public HyperLogLog getHyperLogLogIfNotExistAndInit(String hllGroup, String partition){
		PartitionedHyperLogLog bf = getPartitionedHyperLogLog(hllGroup);
		return bf.getHyperloglog(partition);
	}
	
	public void dump() {
		for(String groupName : partitionedHyperloglogsInDb.keySet()) {
			PartitionedHyperLogLog partitionedHyperLogLog = partitionedHyperloglogsInDb.get(groupName);
			try {
				partitionedHyperLogLog.dump();
			}catch(Exception e) {
				//ignore
				logger.error("error on dump,groupName:"+groupName,e);
			}
		}
	}
	
	public void cleanNoChangePartitionedHyperLogLogFromMemory() {
		for(String groupName : partitionedHyperloglogsInDb.keySet()) {
			PartitionedHyperLogLog partitionedHyperLogLog = partitionedHyperloglogsInDb.get(groupName);
			if(!partitionedHyperLogLog.isChanged()) {
				logger.info("cleanNoChangePartitionedHyperLogLogFromMemory,groupName:"+groupName);
				partitionedHyperloglogsInDb.remove(groupName);
			}
		}
	}

	private void startDumpHyperloglogDb() {
		Runnable task = new Runnable() {
			@Override
			public void run() {
				logger.info("start to dump HyperloglogDb. baseDir= "+ baseDir + ",db=" + partitionedHyperloglogsInDb.keySet());
				try {
					dump();
					cleanNoChangePartitionedHyperLogLogFromMemory();
				} catch (Exception e) {
					logger.error(" dump PartitionedHyperLogLog error! ", e);
				}
			}
		};
		executor.scheduleAtFixedRate(task, dumpIntervalSeconds, dumpIntervalSeconds , TimeUnit.SECONDS);
	}
}
