package app.tool;

import java.io.File;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;

import org.rygh.core.util.UIo;
import org.rygh.core.util.timer.TaskTime;
import org.rygh.core.util.timer.TaskTimeManager;
import org.rygh.semantic.jena.DbIterator;
import org.rygh.semantic.jena.SDatabase;
import org.rygh.semantic.model.ex.Degreer;
import org.rygh.semantic.model.rdf.Entity;
import org.rygh.semantic.model.rdf.Fact;
import org.rygh.semantic.model.rdf.Predicate;
import org.rygh.semantic.sparql.RecursiveSparQL;
import org.rygh.semantic.sparql.ResultTuple;

import wsi.input.InputFile;
import wsi.input.InputFileTuple;
import wsi.script.AppConfig;
import wsi.script.QueryTuple;
import app.App;

public class prepareInput extends App {

	private static boolean __DEBUG = false;

	private static String getFileName(QueryTuple tuple, boolean checkDegree) {
		return String.format("%s_%s%s.txt", tuple.getType().getBodyString(),
				tuple.getNamePredicate().getBodyString(),
				checkDegree ? "_degree" : "");
	}

	public static void main(String[] args) {
		assert args.length > 1;

		init();
		AppConfig.init(args[0]);

		boolean checkDegree = Boolean.parseBoolean(args[1]);
		run(checkDegree);
	}

	private static void process(QueryTuple tuple, int level, File dir,
			Set<Entity> entity_set, InputFile ifile, boolean checkDegree,
			boolean isMbz) {
		Entity type = tuple.getType();
		Predicate pred = tuple.getNamePredicate();

		if (__DEBUG)
			System.out.printf("\nprocess %s.%s.%s.%d.%b\n", tuple.getName(),
					type.getValue(), pred.getValue(), level, checkDegree);

		RecursiveSparQL query = new RecursiveSparQL();
		Entity ex = new Entity("?x");
		Entity ey = new Entity("?y");
		Fact fact;

		if (tuple.getLinkPredicate() == null) {
			fact = new Fact(ex, new Predicate("w:type"), type);
			query.addFact(fact);
		} else {
			Entity ez = new Entity("?z");

			fact = new Fact(ez, new Predicate("w:type"), type);
			query.addFact(fact);

			fact = new Fact(ez, tuple.getLinkPredicate(), ex);
			query.addFact(fact);
		}

		fact = new Fact(ex, pred, ey);
		query.addFact(fact);
		query.setLevel(level);
		query.groupBy("?y");
		query.having("count(distinct ?x) = 1");

		if (__DEBUG)
			System.out.printf("query:\n%s\n", query.toSelectSparql());

		Degreer deg = new Degreer();

		DbIterator iter = SDatabase.shared().iterator();

		iter.begin(query);
		ResultTuple m;
		Entity e;

		final int UNIT = 10_000;
		final int M = 1_000_000;
		int count = 0;

		while ((m = iter.next()) != null) {
			count++;

			if (__DEBUG)
				if (count % UNIT == 0)
					System.out.printf("\t %.2fm entities processed.\r", 1.
							* count / M);

			if (m.isEmpty())
				continue;

			String value = m.getY().getValue();

			// e = SDataAccess.getSubject(value, tuple.getNamePredicate());
			e = m.getSampledX();
			assert e != null : m;

			if (entity_set.contains(e))
				continue;

			entity_set.add(e);

			/*
			 * y:A type Person, y:A name "a", for "a", only one entity with type
			 * and name. But there may also be y:A_(band) name "a" and
			 * y:A_(band) is not a person. so we don't check here.
			 */

			// if (SDataAccess.hasDuplicatedEntites(value, pred, isMbz)) {
			// System.out.printf("%s (%s) has duplicated entity for %s\n", e,
			// value, pred.getValue());
			//
			// assert false;
			// continue;
			// }

			int d = checkDegree ? deg.getDegree(e) : -1;

			InputFileTuple de = new InputFileTuple(e, value, d);
			ifile.add(de);

			if (__DEBUG)
				if (!ifile.isEmpty() && ifile.size() % UNIT == 0)
					System.out.printf("\t %.2fm entities added.\r\n", 1.
							* ifile.size() / M);
		}

		iter.end();

		if (__DEBUG)
			System.out.printf("\nlist size %d / %d.\n", count, ifile.size());
	}

	private static void run(boolean checkDegree) {
		AppConfig ac = AppConfig.shared();

		boolean isMbz = ac.isMbz();

		File dir = ac.getResourceInputFolder();

		if (!dir.exists())
			dir.mkdir();

		TaskTimeManager tm = new TaskTimeManager();

		Map<String, QueryTuple> map = ac.getQueryMap();

		InputFile ifile = new InputFile();
		Set<String> file_cache = new HashSet<>();
		Set<Entity> entity_set = new HashSet<>();

		int LIMIT = 10_000;

		for (Entry<String, QueryTuple> e : map.entrySet()) {
			QueryTuple tuple = e.getValue();

			if (__DEBUG) {
				// if (!tuple.getName().equals("song_1"))
				// continue;
			}

			String fname = getFileName(tuple, checkDegree);

			if (file_cache.contains(fname)) {
				if (__DEBUG)
					System.out.println("obtained previously " + fname);

				continue;
			}

			file_cache.add(fname);

			//

			TaskTime timer = tm.newTaskAndStart(fname);

			entity_set.clear();
			ifile.clear();

			for (int i = 0; i < 5; i++) {
				process(tuple, i, dir, entity_set, ifile, checkDegree, isMbz);

				if (LIMIT > 0 && ifile.size() > LIMIT) {
					if (__DEBUG)
						System.out.println("obtained enough.");

					break;
				}
			}

			if (checkDegree)
				ifile.sort();

			timer.end();

			save(dir, tuple, ifile, checkDegree);
		}

		String fname = String.format("timers%s.txt", checkDegree ? "_degree"
				: "");
		UIo.saveFile(new File(dir, fname), tm.toString());

		bye();
	}

	private static void save(File dir, QueryTuple tuple, InputFile ifile,
			boolean checkDegree) {
		String fname = getFileName(tuple, checkDegree);

		File file = new File(dir, fname);

		// if (file.exists()) {
		// System.out.println("file exists, skipping.");
		// return;
		// }

		UIo.saveFile(file, ifile.toString());

	}
}
