package myLucene;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Set;

import org.apache.lucene.analysis.KeywordAnalyzer;
import org.apache.lucene.analysis.de.GermanAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermFreqVector;
import org.apache.lucene.index.TermPositionVector;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import prepro.CorpusToInt;

public class ReadQueries {
	public static LinkedList<SpanNearQuery> readIdioms() throws IOException,
			ParseException {
		Directory iDirectory = FSDirectory.open(new File(
				"/local/joofeit/Index_Full/"));
		IndexSearcher searcher2 = new IndexSearcher(iDirectory, true);

		IndexReader iReader = searcher2.getIndexReader();
		IndexSearcher searcher = new IndexSearcher(iReader);
		BufferedReader br = new BufferedReader(new FileReader(new File(
				"/local/joofeit/idiomsParsed")));
//		BufferedWriter bw = new BufferedWriter(new FileWriter(new File(
//				"/local/joofeit/idiomsParsed")));
		LinkedList<SpanNearQuery> qList = new LinkedList<SpanNearQuery>();
		String line = "";
		StandardAnalyzer ana = new StandardAnalyzer(Version.LUCENE_30);
		Set<String> set = new HashSet<String>();
		Set<String> MY_STOP_WORDS = new HashSet<String>();

		QueryParser queryParser = new QueryParser(Version.LUCENE_30, "content",
				new StandardAnalyzer(Version.LUCENE_30, MY_STOP_WORDS));
		// new KeywordAnalyzer());
		// queryParser.setPhraseSlop(0);

//		while ((line = br.readLine().trim()) != null) {
//			line = line.toLowerCase();
//			String lemma = "";
//			String index = "";
//			String[] query = getIdiomIntegers(line);
//			int df = iReader.docFreq(new Term("index",s));
//			if (df > 0) {
//				System.out.println("Word: " + s);
//				Query query2 = queryParser.parse(s);
//				System.out.println(">" + s + "< ... Parsed as: "
//						+ query2.toString());
//				System.out.println("DF " + df);
//				TopDocs td = searcher.search(query2, 100);
//				ScoreDoc[] hits = td.scoreDocs;
//				// ScoreDoc sc = hits[hits.length-1];
//				System.out.println("Hits lenght: " + hits.length);
//				int docNumber = hits[0].doc;
//				int termidx = -1111111;
//
//				String[] docIndex = iReader.document(docNumber).getValues(
//						"index");
//				String[] docLemma = iReader.document(docNumber).getValues(
//						"lemma");
//				TermFreqVector tfvector = iReader.getTermFreqVector(docNumber,
//						"content");
//				TermPositionVector tpvector = (TermPositionVector) iReader
//						.getTermFreqVector(docNumber, "content");
//
//				termidx = tfvector.indexOf(s);
//
//				int[] termposx = tpvector.getTermPositions(termidx);
//				for (int i : termposx) {
//					System.out.println("!!!! "
//							+ i
//							+ " "
//							+ Lucene.getDocToString(
//									iReader.document(docNumber), "content"));
//
//					lemma = lemma + docLemma[i] + " ";
//					index = index + docIndex[i] + " ";
//					break;
//				}
//				// System.out.println("lemma "+lemma+" index "+index);
//
//			} else {
//				System.out.println("Not in the corpus " + s);
//			}
//
//			System.out.println(lemma + "/ " + index);
//			bw.write(lemma + "/ " + index + "\n");
//		}
//		bw.flush();
//		bw.close();
//		 IndexSearcher searcher2 = new IndexSearcher(iReader);
		 while ((line = br.readLine())!=null ) {
			System.out.println("Idiom: ... " +line);
		String[] query= getIdiomIntegers(line.trim());
		 SpanQuery[] sq = new SpanQuery[query.length] ;
		
		 for (int counter =0;counter< query.length ; counter++) {
		SpanTermQuery stq = new SpanTermQuery(new Term( Props.queryField,query[counter]));
		 sq[counter] = stq;
		 
		 }
		 
		 qList.add(new SpanNearQuery(sq, Props.slope, Props.inOrder));
		 System.out.println("qList length "+ qList.size());
		 }

		br.close();
		return qList;
	}

	public static String[] getIdiomIntegers(String line) {
		String[] idiomIntsRaw = line.split("/")[1].trim().split(" ");
		String[] idiomInts = new String[idiomIntsRaw.length];
		for(int x =0; x< idiomIntsRaw.length; x++){
			idiomInts[x] = idiomIntsRaw[x].trim();
			
		}
		
		String test = "";
		for(String s: idiomInts){
			test += "-"+s;
		}
		System.out.println("§§§§§§§§§§ "+test+" "+idiomInts.length);
		return idiomInts;
	}

}
