package de.connecttext.main.AcquireData;

import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import de.connecttext.application.Constants;
import de.connecttext.dao.solr.SolrDao;
import de.connecttext.dao.solr.WikipediaSolrDao;
import de.connecttext.dao.sql.SqlDao;
import de.connecttext.deprecated.GutenbergSolrDao;
import de.connecttext.deprecated.OriginalTermRecovery;
import de.connecttext.exception.SqlAccessException;
import de.connecttext.model.TagVector;
import de.connecttext.model.TextDocument;
import de.connecttext.services.Stopwatch;
import de.connecttext.threadpool.ReadWriteRefineTask;
import de.connecttext.threadpool.RetryRejectedExecutionHandler;

public class AcquireData {

	/**
	 * @param args
	 * @throws IOException
	 * @throws SqlAccessException 
	 * @throws SQLException 
	 * @throws ClassNotFoundException 
	 * @throws Exception
	 */
	public static void main(String[] args) throws IOException, ClassNotFoundException, SQLException, SqlAccessException {

		// LinkedBlockingQueue is an unlimited Queue
		BlockingQueue<Runnable> blockingQueue = new LinkedBlockingQueue<Runnable>();
		ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(2, 4,
				10L, TimeUnit.SECONDS, blockingQueue,
				new RetryRejectedExecutionHandler());
		threadPoolExecutor.allowCoreThreadTimeOut(true);

		int taskCounter = 0;
		
		SqlDao sqlDao = SqlDao.getInstance();

		int maxDocs = WikipediaSolrDao.getInstance().length();

		// As there will probably be a "remainder" of documents wenn divided by the
		// Stepsize, we have to take care of that
		int maxDocsRounded = maxDocs
				- (maxDocs % Constants.SQL_DOCUMENTS_BUFFER_SIZE);

		//iterate over all Docs
		for (int i = 0; i < maxDocsRounded; i += Constants.SQL_DOCUMENTS_BUFFER_SIZE) {

			threadPoolExecutor.execute(new ReadWriteRefineTask(i,
					Constants.SQL_DOCUMENTS_BUFFER_SIZE, ++taskCounter));

		}

		//execute the remaining docs which are the "remainder" of the above division
		threadPoolExecutor.execute(new ReadWriteRefineTask(maxDocsRounded,
				maxDocs - maxDocsRounded, ++taskCounter));
		
		System.out.println("All " + maxDocs + " given to the ThreadpoolExecutor");


	}

	public static void main2(String[] args) throws Exception {

		SolrDao solrDao = new WikipediaSolrDao();
		SqlDao sqlDao = SqlDao.getInstance();
		(new OriginalTermRecovery()).start();

		List<TextDocument> bufferedDocuments = new ArrayList<TextDocument>();

		int docCount = 0;

		Stopwatch stopwatch = new Stopwatch();
		stopwatch.start();

		for (TextDocument document : solrDao) {

			bufferedDocuments.add(document);
			docCount++;
			if (bufferedDocuments.size() >= Constants.SQL_DOCUMENTS_BUFFER_SIZE) {
				System.out.println("Documents Read From Solr: " + docCount
						+ " in " + stopwatch.getElapsedTime());
				// System.out.println("In retrieveTagVectors:  SolrQuery: " +
				// solrDao.elapsedTime1 + " ms, JSONParse: " +
				// solrDao.elapsedTime2 + " ms, Writing to TagVectors: " +
				// solrDao.elapsedTime3 + " ms");
				solrDao.elapsedTime1 = 0;
				solrDao.elapsedTime2 = 0;
				solrDao.elapsedTime3 = 0;

				stopwatch.reset();
				stopwatch.start();
				sqlDao.writeDocuments(bufferedDocuments);
				System.out.println("Documents Written To Database: " + docCount
						+ " in " + stopwatch.getElapsedTime());
				stopwatch.reset();
				stopwatch.start();
				bufferedDocuments.clear();
			}
			// if (docCount > 5000) break;
		}

		sqlDao.flushBufferedDocuments();

	}
}
