package tools;

import java.io.File;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Date;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import rainbownlp.core.Artifact;
import rainbownlp.util.SystemUtil;
import transientTables.TermsTFIDF;
import pitt.search.semanticvectors.BuildIndex;

public class LuceneManager {
	String luceneIndexPath;
	Directory dir;
	public static void main(String[] args) throws IOException, SQLException
	{
		LuceneManager sv = new LuceneManager();
		sv.createIndex();
		sv.calculateTermsTFIDFs();
	}
    public LuceneManager() throws IOException
	{
		luceneIndexPath = Configuration.getValue("LuceneIndexFile");
		dir = FSDirectory.open(new File(luceneIndexPath));
	}
	
	public void createIndex() throws SQLException
	{
		Date start = new Date();
	    try {
	      System.out.println("Indexing to directory '" + luceneIndexPath + "'...");

	      Analyzer analyzer = 
	    	  new StandardAnalyzer(Version.LUCENE_31);
	      IndexWriterConfig iwc = 
	    	  new IndexWriterConfig(Version.LUCENE_31, analyzer);

//	        // Create a new index in the directory, removing any
//	        // previously indexed documents:
	        iwc.setOpenMode(OpenMode.CREATE);


	      // Optional: for better indexing performance, if you
	      // are indexing many documents, increase the RAM
	      // buffer.  But if you do this, increase the max heap
	      // size to the JVM (eg add -Xmx512m or -Xmx1g):
	      //
	      // iwc.setRAMBufferSizeMB(256.0);

	      IndexWriter writer = new IndexWriter(dir, iwc);
	      indexSentences(writer);

	      // NOTE: if you want to maximize search performance,
	      // you can optionally call optimize here.  This can be
	      // a costly operation, so generally it's only worth
	      // it when your index is relatively static (ie you're
	      // done adding documents to it):
	      //
	      // writer.optimize();

	      writer.close();

	      Date end = new Date();
	      System.out.println(end.getTime() - start.getTime() + " total milliseconds");

//	      Syst("Building Semantic Vector Index...", 2);
	      
	     BuildIndex.main(new String[]{luceneIndexPath});
			
	    } catch (IOException e) {
	      System.out.println(" caught a " + e.getClass() +
	       "\n with message: " + e.getMessage());
	    }
	}
	private void indexSentences(IndexWriter writer)
		    throws IOException, SQLException {
		
     
      List<Artifact> sentences = Artifact.listByType(Artifact.Type.Sentence);
      
     for (Artifact sent:sentences)
      {
    	  // make a new, empty document for each sentence
          Document doc = new Document();
          
          String doc_content = sent.getContent();
		  String filename = sent.getAssociatedFilePath();
		  
	      // Add the path of the file as a field named "path".  Use a
	      // field that is indexed (i.e. searchable), but don't tokenize 
	      // the field into separate words and don't index term frequency
	      // or positional information:
	      Field pathField = new Field("filename", filename, 
	    		  Field.Store.YES, Field.Index.NO);
	      pathField.setOmitTermFreqAndPositions(true);
	      doc.add(pathField);
	
	      // Add the contents of the file to a field named "contents".  Specify a Reader,
	      // so that the text of the file is tokenized and indexed, but not stored.
	      // Note that FileReader expects the file to be in UTF-8 encoding.
	      // If that's not the case searching for special characters will fail.
	      doc.add(new Field("contents",
	    		  doc_content.toLowerCase().replaceAll("(\\d)+", "").replaceAll("(\\.|\\,|\\:|\\'s)+", " ").
	    		  	replace("_", " ").replaceAll("\\s+", " "), 
	    		  Field.Store.YES, Field.Index.ANALYZED,
	    		  TermVector.WITH_POSITIONS_OFFSETS));
	      
	       if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
	        // New index, so we just add the document (no old document can be there):
	        System.out.println("adding :" + filename);
	        writer.addDocument(doc);
	      }
      }
    }
	
	public void calculateTermsTFIDFs() throws CorruptIndexException, IOException, SQLException
	{
		dir = FSDirectory.open(new File(luceneIndexPath));
		 IndexReader indexReader = 
			 IndexReader.open(dir);
		TermEnum terms = indexReader.terms();
	    int tc = 0;
	    while(terms.next()){
	      tc++;
	    }
	    System.out.println("There are " + 
	    		tc + 
	    		" terms (and " + indexReader.numDocs() + " docs)");

	    tc = 0;
	    terms = indexReader.terms();
	   while (terms.next()) {
		   // Output progress counter.
	      if (( tc % 50 == 0 ) || ( tc < 50 && tc % 10 == 0 )) {
	    	  System.out.println("Processed " + tc + " terms ... ");
	      }
	      tc++;
	      Term term = terms.term();
//	      if(Utils.isStopWord(term.text())) continue;

	      int doc_f = indexReader.docFreq(term);
			
	      TermDocs tDocs = indexReader.termDocs(term);
	      while (tDocs.next()) {
	    	Integer docID = tDocs.doc();
	        Document doc = indexReader.document(docID);
			String filename = doc.get("filename");
			int freq = tDocs.freq();
	         TermsTFIDF.insertTerm(filename, term.text(), 0, 
					 freq, doc_f);
	        
	      }
	    }
	   tc = 0;
	   terms = indexReader.terms();
	   while (terms.next()) {
		      // Output progress counter.
		      if (( tc % 50 == 0 ) || ( tc < 50 && tc % 10 == 0 )) {
//		    	  Utils.log("Calculated TFIDF " + tc + " terms ... ",2);
		    	  System.out.println("Calculated TFIDF " + tc + " terms ... ");
		      }
		      tc++;

		      Term term = terms.term();
//		      if(Utils.isStopWord(term.text())) continue;
		      int doc_f = indexReader.docFreq(term);
				
		      TermDocs tDocs = indexReader.termDocs(term);
		      while (tDocs.next()) {
		    	Integer docID = tDocs.doc();
		        Document doc = indexReader.document(docID);
				String filename = doc.get("filename");
				int term_freq = tDocs.freq();
				double normalized_term_freq = normalizeTermFreq(filename, term_freq);
		        double tfidf = getTFIDF(normalized_term_freq,
						 doc_f, indexReader.numDocs());
				 TermsTFIDF.updateTFIDF(filename, term.text(), tfidf);
		        
		      }
		    }

		
	}
	private double normalizeTermFreq(String filename, int term_freq) throws SQLException {
		int maxFreq = TermsTFIDF.getMaxTermFreq(filename);
		double normalized_freq = (double)term_freq / (double)maxFreq;
		return normalized_freq;
	}
	double getTFIDF(double term_frquency, int doc_freuqnecy, int total_doc)
	{
		return term_frquency*Math.log(total_doc/doc_freuqnecy);
	}
	double getOkapiBM25(int term_frquency, int doc_freuqnecy, int total_doc, 
			int doc_length, double doc_average_length)
	{
		return ((term_frquency*3)/(term_frquency+2*(1-0.75+0.75*doc_length/doc_average_length)))*
		Math.log((total_doc - doc_freuqnecy + 0.5) / (doc_freuqnecy + 0.5));
	}

	
}
