package ar.uba.dc.webming.tp1.main;



import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.List;


import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.snowball.SnowballAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import ar.uba.dc.webming.tp1.measures.ResultSetEvaluator;
import ar.uba.dc.webming.tp1.output.QueryResultFileWriter;
import ar.uba.dc.webming.tp1.relevance.RelevanceJudgementMap;
import ar.uba.dc.webming.tp1.relevance.RelevanceJudgment;
import ar.uba.dc.webming.tp1.relevance.RelevanceParser;
import ar.uba.dc.webming.tp1.scoring.SimilarityIdfModified;
import ar.uba.dc.webming.tp1.scoring.SimilarityNormCubeRoot;
import ar.uba.dc.webming.tp1.scoring.SimilarityNormOne;
import ar.uba.dc.webming.tp1.scoring.SimilarityTfCubeRoot;
import ar.uba.dc.webming.tp1.scoring.SimilarityTfLineal;



public class ExecuteOhsuQueries {



  public static void main(String[] args) throws Exception {
    String usage =
    	"Usage:\tjava "+ExecuteOhsuQueries.class.getName()+ " " +
    	"-index <index-dir> -querys <query-file> -rels <relevance-file> -out <output-file> " +
    	"[-sim similarityNum] [-an analyzerNum] [-ql qlNum]\n [-acr acrIndex]";
	usage += "\tAnalyzer number description:\n";
	usage += "\tanalyzerNum = 0\t\tLucene StandardAnalizer (default)\n";
	usage += "\tanalyzerNum = 1\t\tSnowballAnalizer with porter stemmer without stopwords\n";
	usage += "\tanalyzerNum = 2\t\tSnowballAnalizer with porter stemmer with english stopwords\n";
	usage += "\tSimilarity number description:\n";
    usage += "\tsimilarityNum = 0\tuses: "+DefaultSimilarity.class.getSimpleName()+" class (default)\n";
    usage += "\tsimilarityNum = 1\tuses: "+SimilarityIdfModified.class.getSimpleName()+" class\n";
    usage += "\tsimilarityNum = 2\tuses: "+SimilarityTfLineal.class.getSimpleName()+" class\n";
    usage += "\tsimilarityNum = 3\tuses: "+SimilarityTfCubeRoot.class.getSimpleName()+" class\n";
    usage += "\tsimilarityNum = 4\tuses: "+SimilarityNormOne.class.getSimpleName()+" class\n";
    usage += "\tsimilarityNum = 5\tuses: "+SimilarityNormCubeRoot.class.getSimpleName()+" class\n";
	usage += "\tql stands for queryLogic, and there are three options using the qlNum:\n";
	usage += "\tqlNum = 0\t\t.T=aTitle AND (.M=aDesc OR .W=aDesc) (default)\n";
	usage += "\tqlNum = 1\t\t.T=\"aTitle\" OR .M=\"aDesc\"\n";
	usage += "\tqlNum = 2\t\t.T=\"aTitle\" OR .M=\"aDesc\" OR .W=aDesc\n";
	usage += "\tacrIndex = path of the acronym index";
	if(args.length>=1 && ("-h".equals(args[0]) || "-help".equals(args[0]))){
		System.out.println(usage);
		return;
	}

    
    String indexFileName = null;
    String queriesFileName = null;
    String relevanceFileName = null;
    String outfileName = null;
    Integer similarityNum = 0;
    Integer analyzerNum = 0;
    Integer qlNum = 0;
    boolean raw = false;
    boolean paging = true;
    int hitsPerPage = 10;
    String acrIndexFile = null;
    
    
    
	int i = 0;
	while(i<args.length){
		String opc = args[i];
		if("-index".equals(opc)){
			indexFileName = args[i+1];
			i++;
		}
		if("-querys".equals(opc)){
			queriesFileName = args[i+1];
			i++;
		}
		if("-rels".equals(opc)){
			relevanceFileName = args[i+1];
			i++;
		}
		if("-out".equals(opc)){
			outfileName = args[i+1];
			i++;
		}
		if("-sim".equals(opc)){
			similarityNum = Integer.valueOf(args[i+1]);
			i++;
		}
		if("-an".equals(opc)){
			analyzerNum = Integer.valueOf(args[i+1]);
			i++;
		}
        if("-ql".equals(opc)){
            qlNum = Integer.valueOf(args[i+1]);
            i++;
        }
        if("-acr".equals(opc)){
            acrIndexFile = args[i+1];
            i++;
        }
		i++;
	}

    if(indexFileName==null || queriesFileName==null || relevanceFileName==null || outfileName==null){
		System.out.println(usage);
		return;
    }
    Similarity similarity = null;
    Analyzer analyzer = null;
    
	if(similarityNum == 1){
		similarity = new SimilarityIdfModified();
	}else if(similarityNum == 2){
		similarity = new SimilarityTfLineal();
	}else if(similarityNum == 3){
		similarity = new SimilarityTfCubeRoot();
	}else if(similarityNum == 4){
		similarity = new SimilarityNormOne();
	}else if(similarityNum == 5){
		similarity = new SimilarityNormCubeRoot();
	}else{
		similarity = new DefaultSimilarity();
	}
	
	if(analyzerNum == 1){
		analyzer = new SnowballAnalyzer("Porter");
	}else if(analyzerNum == 2){
		analyzer = new SnowballAnalyzer("Porter", StopAnalyzer.ENGLISH_STOP_WORDS);
	}else{
		analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
	}
	
    List<RelevanceJudgment> relJudgs = new RelevanceParser().parse(new InputStreamReader(new FileInputStream(new File(relevanceFileName))));
    RelevanceJudgementMap relJudgMap = new RelevanceJudgementMap(relJudgs);
    IndexReader reader = IndexReader.open(FSDirectory.open(new File(indexFileName)), true); // only searching, so read-only=true
    QueryResultFileWriter resultWriter = new QueryResultFileWriter(new File(outfileName));
    

    Searcher searcher = new IndexSearcher(reader);

    searcher.setSimilarity(similarity);
    
    BufferedReader in = new BufferedReader(new FileReader(queriesFileName));
    QueryParser parser = new QueryParser(".M", analyzer);
 
    while (true) {
        
          if (queriesFileName == null)                        // prompt the user
              System.out.println("Enter query: ");
          
          String topTag = in.readLine();
          while(topTag!=null && !topTag.equals("<top>")){
        	  topTag = in.readLine();
          }
          if(topTag==null) break;
          //assert(in.readLine().equals("<top>")); // <top>
          String ohsuNum = in.readLine();
          assert(ohsuNum.contains("<num> Number: "));
          String titleLine = in.readLine();
          String title = titleLine.substring(8);
          String descTag = in.readLine();
          assert( descTag.equals("<desc> Description:"));
          String desc = in.readLine();
          String topCloseTag = in.readLine();
          assert(topCloseTag.equals("</top>"));
        
          String queryStr = "";
          if(qlNum == 1){
              queryStr = ".T:\""+title.trim()+"\" OR \""+desc.trim()+"\""; // primeros resultados
          }else if(qlNum == 2){
              queryStr = ".T:\""+title.trim()+"\" OR \""+desc.trim()+"\" OR " +createQueryForField(analyzer, ".W", desc.trim()); // segundos resultados
          }else{
              queryStr = "("+createQueryForField(analyzer, ".T", title.trim())+") AND (\""+desc.trim()+"\" OR " +createQueryForField(analyzer, ".W", desc.trim())+")";// terceros resultados
          }

          if(acrIndexFile!=null){
              queryStr = "("+queryStr+")";
              queryStr = appendAcronismDefIfExists(title, acrIndexFile, queryStr, analyzer, ".T");
              queryStr = appendAcronismDefIfExists(title, acrIndexFile, queryStr, analyzer, ".M");
              queryStr = appendAcronismDefIfExists(desc, acrIndexFile, queryStr, analyzer, ".M");
              queryStr = appendAcronismDefIfExists(desc, acrIndexFile, queryStr, analyzer, ".T");
          }

          
          Query query = parser.parse(queryStr);
          System.out.println("Searching for: " + ohsuNum.substring(14) +", "+ query.toString());
          searcher.search(query, null, 100);
          
          ResultSetEvaluator evaluator = new ResultSetEvaluator(searcher, query, relJudgMap, ohsuNum.substring(14));
          resultWriter.appendResults(evaluator);

        
          if (paging) {
            doPagingSearch(in, searcher, query, hitsPerPage, raw, queriesFileName == null);
          } else {
            doStreamingSearch(searcher, query);
          }
    }
    resultWriter.close();
    reader.close();
    System.out.println("Done");
  }
  
	private static String appendAcronismDefIfExists(String fieldValue, String acrIndexFile, String query, 
	        Analyzer analyzer, String fieldName) 
	        throws CorruptIndexException, IOException, ParseException {
	    IndexReader reader = IndexReader.open(FSDirectory.open(new File(acrIndexFile)), true);
	    Searcher searcher = new IndexSearcher(reader);
	    QueryParser parser = new QueryParser("ACR", analyzer);
	    String[] words = fieldValue.split(" ");
	    int i = 0;
	    while(i<words.length){
	        String word = words[i];
	        word = word.replaceAll("[^a-zA-Z0-9]", "");
	        String acrQueryStr = "ACR:\""+word+"\"";
	        Query acrQuery = parser.parse(acrQueryStr);
	        TopDocs topDocs = searcher.search(acrQuery, null, 100);
	        if(topDocs.totalHits>0){
	            ScoreDoc[] hits = topDocs.scoreDocs;
	            for (int j = 0; j < hits.length; j++) {
                    Document doc = searcher.doc(hits[j].doc);
                    if(doc.get("ACR").equalsIgnoreCase(word)){
                        String acrMeaning = doc.get("MEANING");
                        query = query + " OR " + fieldName +":\""+acrMeaning+"\" ";
                    }
                }
	        }
	        i++;
	    }
	    return query;
	}

    private static String createQueryForField(Analyzer analyzer,
			String field, String text) throws ParseException {
		QueryParser parser = new QueryParser(field, analyzer);
		return  parser.parse(text).toString();
	}
  
  /**
   * This method uses a custom HitCollector implementation which simply prints out
   * the docId and score of every matching document. 
   * 
   *  This simulates the streaming search use case, where all hits are supposed to
   *  be processed, regardless of their relevance.
   */
  public static void doStreamingSearch(final Searcher searcher, Query query) throws IOException {
    Collector streamingHitCollector = new Collector() {
      private Scorer scorer;
      private int docBase;
      
      // simply print docId and score of every matching document
      public void collect(int doc) throws IOException {
        System.out.println("doc=" + doc + docBase + " score=" + scorer.score());
      }

      public boolean acceptsDocsOutOfOrder() {
        return true;
      }

      public void setNextReader(IndexReader reader, int docBase)
          throws IOException {
        this.docBase = docBase;
      }

      public void setScorer(Scorer scorer) throws IOException {
        this.scorer = scorer;
      }
      
    };
    
    searcher.search(query, streamingHitCollector);
  }

  /**
   * This demonstrates a typical paging search scenario, where the search engine presents 
   * pages of size n to the user. The user can then go to the next page if interested in
   * the next hits.
   * 
   * When the query is executed for the first time, then only enough results are collected
   * to fill 5 result pages. If the user wants to page beyond this limit, then the query
   * is executed another time and all hits are collected.
   * 
   */
  public static void doPagingSearch(BufferedReader in, Searcher searcher, Query query, 
                                     int hitsPerPage, boolean raw, boolean interactive) throws IOException {
 
    // Collect enough docs to show 5 pages
    TopScoreDocCollector collector = TopScoreDocCollector.create(
        5 * hitsPerPage, false);
    searcher.search(query, collector);
    ScoreDoc[] hits = collector.topDocs().scoreDocs;
    
    int numTotalHits = collector.getTotalHits();
    System.out.println(numTotalHits + " total matching documents");

    int start = 0;
    int end = Math.min(numTotalHits, hitsPerPage);
        
    while (true) {
      if (end > hits.length) {
        System.out.println("Only results 1 - " + hits.length +" of " + numTotalHits + " total matching documents collected.");
        System.out.println("Collect more (y/n) ?");
        String line = in.readLine();
        if (line.length() == 0 || line.charAt(0) == 'n') {
          break;
        }

        collector = TopScoreDocCollector.create(numTotalHits, false);
        searcher.search(query, collector);
        hits = collector.topDocs().scoreDocs;
      }
      
      end = Math.min(hits.length, start + hitsPerPage);
      
      for (int i = start; i < end; i++) {
        if (raw) {                              // output raw format
          System.out.println("doc="+hits[i].doc+" score="+hits[i].score);
          continue;
        }

        Document doc = searcher.doc(hits[i].doc);
        String ui = doc.get(".U");
        if (ui != null) {
          System.out.println((i+1) + ". UI: " + ui);
          String title = doc.get(".T");
          if (title != null) {
            System.out.println("   Title: " + title);
          }
        } else {
          System.out.println((i+1) + ". " + "No path for this document");
        }
                  
      }

      if (!interactive) {
        break;
      }

      if (numTotalHits >= end) {
        boolean quit = false;
        while (true) {
          System.out.print("Press ");
          if (start - hitsPerPage >= 0) {
            System.out.print("(p)revious page, ");  
          }
          if (start + hitsPerPage < numTotalHits) {
            System.out.print("(n)ext page, ");
          }
          System.out.println("(q)uit or enter number to jump to a page.");
          
          String line = in.readLine();
          if (line.length() == 0 || line.charAt(0)=='q') {
            quit = true;
            break;
          }
          if (line.charAt(0) == 'p') {
            start = Math.max(0, start - hitsPerPage);
            break;
          } else if (line.charAt(0) == 'n') {
            if (start + hitsPerPage < numTotalHits) {
              start+=hitsPerPage;
            }
            break;
          } else {
            int page = Integer.parseInt(line);
            if ((page - 1) * hitsPerPage < numTotalHits) {
              start = (page - 1) * hitsPerPage;
              break;
            } else {
              System.out.println("No such page");
            }
          }
        }
        if (quit) break;
        end = Math.min(numTotalHits, start + hitsPerPage);
      }
      
    }

  }
  
  
  

}