package demo.org.apache.lucene.demo;

/**
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.util.HashMap;
import java.util.Vector;

import org.apache.lucene.document.DateTools;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.TermVector;

/** A utility for making Lucene Documents from a File. */

public class FileDocument {
  /** Makes a document for a File.
    <p>
    The document has three fields:
    <ul>
    <li><code>path</code>--containing the pathname of the file, as a stored,
    untokenized field;
    <li><code>modified</code>--containing the last modified date of the file as
    a field as created by <a
    href="lucene.document.DateTools.html">DateTools</a>; and
    <li><code>contents</code>--containing the full contents of the file, as a
    Reader field;
    */
  public static Document Document(File f)
       throws java.io.FileNotFoundException {
	 
    // make a new, empty document
    Document doc = new Document();

    // Add the path of the file as a field named "path".  Use a field that is 
    // indexed (i.e. searchable), but don't tokenize the field into words.
    //doc.add(new Field("path", f.getPath(), Field.Store.YES, Field.Index.UN_TOKENIZED));

    // Add the last modified date of the file a field named "modified".  Use 
    // a field that is indexed (i.e. searchable), but don't tokenize the field
    // into words.
//    doc.add(new Field("modified",
//        DateTools.timeToString(f.lastModified(), DateTools.Resolution.MINUTE),
//        Field.Store.YES, Field.Index.UN_TOKENIZED));

    if(f.canRead()) {
    	FileReader fr = new FileReader(f);
    	String abc = null;
		try {
			BufferedReader br = new BufferedReader(fr);
			String x;
			while ((x = br.readLine()) != null) abc+=" "+x;
			br.close();
			fr.close();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			System.out.println("This File Couldnot be Indexed" + f.getAbsolutePath());
		}
		if(abc==null) return doc;
		String a = new String(abc.trim());
		a=a.toLowerCase();
		try {
		String docno= a.substring(a.indexOf("<docno>")+7, a.indexOf("</docno>"));
		//String title= a.substring(a.indexOf("<title>")+7, a.indexOf("</title>"));
		String cont= a.substring(a.indexOf("<text>")+6, a.indexOf("</text>"));
		
		doc.add(new Field("docid", docno,Field.Store.YES,Field.Index.NOT_ANALYZED));
		//doc.add(new Field("title", title,Field.Store.YES,Field.Index.ANALYZED,TermVector.WITH_POSITIONS_OFFSETS));
		doc.add(new Field("contents", cont,Field.Store.NO,Field.Index.ANALYZED,TermVector.WITH_POSITIONS_OFFSETS));
		doc.add(new Field("contents_e", cont,Field.Store.YES,Field.Index.NOT_ANALYZED,TermVector.WITH_POSITIONS_OFFSETS));
		doc.add(new Field("path", f.getAbsolutePath().substring(f.getAbsolutePath().indexOf("mr.docs.2011/")+13),Field.Store.YES,Field.Index.NOT_ANALYZED));
		}catch (StringIndexOutOfBoundsException e) {
			return doc;
		}
        /*String[] NEs = getIdentifiedNEs(cont);
        if (NEs != null) {
        	for ( int i = 0; i < NEs.length; i++ ) {
        		doc.add(new Field("NER", (NEs[i].split("/")[0].trim()), Field.Store.YES, Field.Index.UN_TOKENIZED)); 
        }
        String[] MWEs = getIdentifiedMWEs(cont);
        if (MWEs != null) {
          for ( int i = 0; i < MWEs.length; i++ ) {
           	doc.add(new Field("MWE", MWEs[i].trim(), Field.Store.YES, Field.Index.UN_TOKENIZED));
          }
        }*/
     // }
    }

    // return the document
    return doc;
  }

  private static String[] getIdentifiedMWEs(String pageContent) {
	    File file = new File("MWE_mr.txt");
		BufferedReader br = null;
		try {
			try {
				br = new BufferedReader(new InputStreamReader(new FileInputStream(file),"UTF-8"));
			} catch (UnsupportedEncodingException e) {
				// TODO Auto-generated catch block
				return null;
			}
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			return null;
		}
		HashMap<String,Integer> list_content = new HashMap<String, Integer>();
		String[] wordlist = pageContent.split(" ");
		for (int counter=0;counter<wordlist.length;counter++) {
			list_content.put(wordlist[counter],counter);
			if(counter >=1) {
				list_content.put(wordlist[counter-1]+" "+wordlist[counter], counter-1);
			}
			if(counter >=2) {
				list_content.put(wordlist[counter-2]+" "+wordlist[counter-1]+" "+wordlist[counter], counter-2);
			}
			if(counter >=3) {
				list_content.put(wordlist[counter-3]+" "+wordlist[counter-2]+" "+wordlist[counter-1]+" "+wordlist[counter], counter-3);
			}
			if(counter >=4) {
				list_content.put(wordlist[counter-4]+" "+wordlist[counter-3]+" "+wordlist[counter-2]+" "+wordlist[counter-1]+" "+wordlist[counter], counter-4);
			}
		}
		String MWE=null;
		Vector<String> namedList = new Vector<String>();
		try {
			while((MWE=br.readLine())!=null) {
				Integer offset = list_content.get(MWE);
				if(offset!=null) {
					namedList.add(MWE);
				}
			}
		} catch (IOException e) {
		}
		String[] finalList = null;
		if(namedList==null) return null;
		finalList= namedList.toArray(new String[namedList.size()]);
		return finalList;
}

private static String[] getIdentifiedNEs(String content) {
	File file = new File("NER_mr.txt");
	BufferedReader br = null;
	try {
		try {
			br = new BufferedReader(new InputStreamReader(new FileInputStream(file),"UTF-8"));
		} catch (UnsupportedEncodingException e) {
			// TODO Auto-generated catch block
		}
	} catch (FileNotFoundException e) {
		// TODO Auto-generated catch block
	}
	HashMap<String,Integer> list_content = new HashMap<String, Integer>();
	String[] wordlist = content.split(" ");
	for (int counter=0;counter<wordlist.length;counter++) {
		list_content.put(wordlist[counter],counter);
		if(counter >=1) {
			list_content.put(wordlist[counter-1]+" "+wordlist[counter], counter-1);
		}
		if(counter >=2) {
			list_content.put(wordlist[counter-2]+" "+wordlist[counter-1]+" "+wordlist[counter], counter-2);
		}
		if(counter >=3) {
			list_content.put(wordlist[counter-3]+" "+wordlist[counter-2]+" "+wordlist[counter-1]+" "+wordlist[counter], counter-3);
		}
		if(counter >=4) {
			list_content.put(wordlist[counter-4]+" "+wordlist[counter-3]+" "+wordlist[counter-2]+" "+wordlist[counter-1]+" "+wordlist[counter], counter-4);
		}
	}
	String NE=null;
	Vector<String> namedList = new Vector<String>();
	try {
		while((NE=br.readLine())!=null) {
			Integer offset = list_content.get(NE);
			if(offset!=null) {
				namedList.add(NE);
			}
		}
	} catch (IOException e) {
	}
	String[] finalList = null;
	if(namedList==null) return null;
	finalList= namedList.toArray(new String[namedList.size()]);
	return finalList;
}

private FileDocument() {}

public static Document Document(String a) {
	Document doc = new Document();
	try {
		String docno= a.substring(a.indexOf("<docno>")+7, a.indexOf("</docno>"));
		String title= a.substring(a.indexOf("<title>")+7, a.indexOf("</title>"));
		String cont= a.substring(a.indexOf("<text>")+6, a.indexOf("</text>"));
		
		doc.add(new Field("docid", docno,Field.Store.YES,Field.Index.NOT_ANALYZED));
		doc.add(new Field("title", title,Field.Store.YES,Field.Index.ANALYZED,TermVector.WITH_POSITIONS_OFFSETS));
		doc.add(new Field("contents", cont,Field.Store.NO,Field.Index.ANALYZED,TermVector.WITH_POSITIONS_OFFSETS));
		doc.add(new Field("contents_e", cont,Field.Store.YES,Field.Index.NOT_ANALYZED,TermVector.WITH_POSITIONS_OFFSETS));
		}catch (StringIndexOutOfBoundsException e) {
			return doc;
		}
	return doc;
	
}
}
    
