package edu.kit.aifb.bowsim.index;

import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.logging.Logger;

import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.util.Version;
import org.semanticweb.yars.nx.DateTimeLiteral;
import org.semanticweb.yars.nx.Literal;
import org.semanticweb.yars.nx.Node;
import org.semanticweb.yars.nx.NumericLiteral;
import org.semanticweb.yars.nx.Resource;
import org.semanticweb.yars.nx.namespace.RDFS;
import org.semanticweb.yars.nx.parser.Callback;

public class TextIndexMatcher {
	transient Logger _log = Logger.getLogger(this.getClass().getName());

	public static int MAX_HITS = 2048;
	
	TextIndexWriter _tiw = null;
	TextIndexReader _tis = null;
	StandardAnalyzer _an = null;
	
	File _dir = null;
	
	public TextIndexMatcher(TextIndexWriter tiw, String lang, File dir) {
		_tiw = tiw;	
		
		_dir = dir;
		
		_an = new StandardAnalyzer(Version.LUCENE_30, new HashSet<String>(Arrays.asList(TextIndexWriter.STOP_WORDS)));
	}
	
	public void matchAll(Iterator<Node[]> quads, Callback cb, float threshold) throws CorruptIndexException, IOException {
		Node oldSub = null;
		Node subject = null;
		Set<Node[]> predobjs = new HashSet<Node[]>();
		
		while (quads.hasNext()) {
			Node[] nx = quads.next();

			subject = nx[0];

			if ((oldSub != null) && (!subject.equals(oldSub))) {
				if (!predobjs.isEmpty()) {
					match(oldSub, predobjs, cb, threshold);
				}

				predobjs = new HashSet<Node[]>();
			}

			Node[] po = new Node[2];
			po[0] = nx[1];
			po[1] = nx[2];
			
			predobjs.add(po);

			oldSub = subject;
		}

		if (!predobjs.isEmpty()) {
			match(oldSub, predobjs, cb, threshold);
		}
	}

	public void match(Node subj, Set<Node[]> predobjs, Callback cb, float threshold) throws CorruptIndexException, IOException {
		_tiw.add(subj, predobjs);
		_tiw.getIndexWriter().commit();
//		_tiw.getIndexWriter().close(true);
//		
//		_tiw = new TextIndexWriter(_dir, "en", false);

		// match now
		_log.info("matching " + subj.toN3() + " " + predobjs.size() + " p/o's");

		if (_tis != null) {
			_tis.close();
		}
		_tis = new TextIndexReader(_tiw.getIndexWriter().getReader());

		List<Term> terms = getTerms(predobjs, false);
		
		Set<Integer> matchdocs = new HashSet<Integer>();
		
		for (Term term : terms) {
			_log.info("checking term " + term);
			TermDocs td = _tis.termDocs(term);
			int[] docs = new int[MAX_HITS];
			int[] lengths = new int[MAX_HITS];
			
			int len = td.read(docs, lengths);
			
			for (int i = 0; i < len; i++) {
				matchdocs.add(docs[i]);
			}
		}

		int doc1 = _tis.getDocId(subj.toN3());

		for (int doc2 : matchdocs) {
			double sim = _tis.similarity(doc1, doc2);

			if (doc1 != doc2 && sim > threshold) {
				_log.info(subj.toString() + "match " + _tis.getUri(doc2) + ": " + sim);

				String uri2 = _tis.getUri(doc2);
				uri2 = uri2.substring(1, uri2.length()-1);

				cb.processStatement(new Node[] { subj, RDFS.SEEALSO, new Resource(uri2) } );
			}
		}
		
		// done with match

		Term term = new Term(TextIndexWriter.URI, subj.toN3());
		_tiw.getIndexWriter().deleteDocuments(term);
		
		_tiw.getIndexWriter().commit();
	}
	
	public List<Term> getTerms(Set<Node[]> predobjs, boolean objects) throws IOException {
		List<Term> terms = new ArrayList<Term>();

		for (Node[] po : predobjs) {
			Node p = po[0];
			Node o = po[1];
			
			if (o instanceof DateTimeLiteral) {
				_log.fine("date");
			} else if (o instanceof NumericLiteral) {
				_log.fine("number");
			} else if (o instanceof Literal) {
				StringReader sr = new StringReader(o.toString());
				TokenStream ts = _an.tokenStream(p.toN3(), sr);
				
				while (ts.incrementToken()) {
					TermAttribute ta = (TermAttribute) ts.getAttribute(TermAttribute.class);
					String s = ta.term();
					_log.finest("term is " + s);
					terms.add(new Term(p.toN3(), s));
				}
				_log.info(p.toN3() + ": " + o.toString());
			} else if (o instanceof Resource && objects) {
				terms.add(new Term(p.toN3(), o.toN3()));
			}
		}
				
		return terms;
	}
}
