package com.flute.haflute.tools.cassandra;

import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.flute.haflute.jobbox.base.RunProcessInterface;
import com.flute.haflute.jobbox.base.storage.KeyValueStorageService;
import com.flute.haflute.jobbox.base.storage.StorageServiceFactory;
import com.flute.haflute.tools.ClusterUtils;

public class ArchiveProcess extends TimerTask implements RunProcessInterface, Runnable {
	private static Logger logger = LoggerFactory.getLogger(ArchiveProcess.class);
	
	private long checkInterval = 24*60*60*1000;
	private boolean initialized = false;
	private KeyValueStorageService cassandraStorageService;
	
	@Override
	public void callMain(String... params) throws Exception {
		try {
			cassandraStorageService = StorageServiceFactory.getStorageService(true);
			String interval = ClusterUtils.get3rdPartConfiguration().getProperty("CrawlerTime2UniqueURL.staleTimeStampInSeconds","86400");
			checkInterval = Long.parseLong(interval) * 1000;
		} catch (Exception e) {
			logger.error("initial achive parameters error, use defaults instead",e);
		}
		new Timer().schedule(this, 60*1000, checkInterval);
		initialized = true;
	}

	@Override
	public boolean isInitialized() {
		return initialized;
	}

	@Override
	public void restart() {
	}

	@Override
	public void shutdown() {

	}

	@Override
	public void run() {
		logger.info("Cassandra archive process start once");
		try {
			if(cassandraStorageService == null) {
				cassandraStorageService = StorageServiceFactory.getStorageService(true);				
			}
			Map<String, String> tsm = cassandraStorageService.batchGet("0", String.valueOf(System.currentTimeMillis() - checkInterval));
			for(String ts : tsm.keySet()) {
				cassandraStorageService.delete(ts);
			}
		} catch (Exception e) {
			logger.error("delete stale data error", e);
		}
		logger.info("Cassandra archive process finished");
	}
}
