package _Trash;


import java.io.File;
import java.util.ArrayList;

import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;

import org.apache.lucene.document.Field;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;

public class NTParser{


	public  ArrayList<org.apache.lucene.document.Document> allVerses = new ArrayList<org.apache.lucene.document.Document>();

	private boolean dbg = false;



	public   ArrayList<org.apache.lucene.document.Document>  parse (String fileName){


		try {

			DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance();
			DocumentBuilder docBuilder = docBuilderFactory.newDocumentBuilder();
			Document doc = docBuilder.parse (new File(fileName)); 		//

			// normalize text representation
			doc.getDocumentElement ().normalize ();
			System.out.println ("Root element of the doc is " + 
					doc.getDocumentElement().getNodeName());

			Element tstmt = doc.getDocumentElement();
			NodeList rootChildren = tstmt.getChildNodes();

			// coverpg, titlepg, bookcoll and maybe preface
			
			
			NodeList coverpg = tstmt.getElementsByTagName("coverpg");
			NodeList titlepg = tstmt.getElementsByTagName("titlepg");
			NodeList bookcolls = tstmt.getElementsByTagName("bookcoll");
			NodeList prefaces = tstmt.getElementsByTagName("preface");

		
			

			//MESSY because I'm actually committing the test Searcher
			//but this is the same as the previous commit.

			//fix problem of quran: no books
			////System.out.println("----> "+ bookcolls.getLength() + " Bejar ce l'ha piccolo");

			
			////Element firstBookcoll = null;
			
			//if(bookcolls.getLength() == 0) {
				System.out.println("virtualize book here");
			//} else {
				//assume 1 only bookcoll for every doc:
				Node bookcoll = bookcolls.item(0);
				Element firstBookcoll = (Element)bookcoll;	
			//}

			NodeList books = firstBookcoll.getElementsByTagName("book");


			int iCounter = 0;

			for (int i = 0 ; i < books.getLength() ; i++) {
				DBG("books...");															//DBG
				Node currentBook = books.item(i);

				//bktLong:
				NodeList bktlongs = ((Element)currentBook).getElementsByTagName("bktlong");
				Node firstBktLong = bktlongs.item(0);
				String firstBktLongString = bktlongs.item(0).getTextContent();
				DBG("bktLong "+firstBktLongString);											//DBG

				//bktShort:
				NodeList bktshort = ((Element)currentBook).getElementsByTagName("bktshort");
				Node firstBktShort = bktshort.item(0);
				String firstBktShortString = bktshort.item(0).getTextContent();				//store bktShort
				DBG("\tbktShort"+firstBktShortString);										//DBG

				//more than 1 chapters!
				NodeList chapters = ((Element)currentBook).getElementsByTagName("chapter");
				for (int k = 0 ; k < chapters.getLength() ; k++) {
					Node firstChapter = chapters.item(k);
					NodeList chTitle = ((Element)firstChapter).getElementsByTagName("chtitle");
					DBG(chTitle.item(0).getTextContent());


					Node firstChTitle = chTitle.item(0);									//assume 1 chTitle
					String firstChTitleString =  firstChTitle.getTextContent(); 			//store chTitle
					DBG("\t\tchTitle >> "+firstChTitle.getTextContent());					//DBG

					//many verses:
					NodeList verses = ((Element)firstChapter).getElementsByTagName("v");
					for (int z = 0 ; z < verses.getLength() ; z++) {
						String vContents = verses.item(z).getTextContent(); 				//
						DBG("\t\t\tv" + vContents);
						org.apache.lucene.document.Document currentDocument = new org.apache.lucene.document.Document();

						currentDocument.add(new Field("verseContents", vContents, Field.Store.YES, Field.Index.TOKENIZED)); //params??
						currentDocument.add(new Field("chtitleContents", firstChTitleString, Field.Store.YES, Field.Index.TOKENIZED));
						currentDocument.add(new Field("chapterID", new Integer(k).toString(), Field.Store.YES, Field.Index.NO));
						currentDocument.add(new Field("bktshortContents", firstBktShortString, Field.Store.YES, Field.Index.TOKENIZED));
						currentDocument.add(new Field("bktlongContents", firstBktLongString, Field.Store.YES, Field.Index.TOKENIZED));
						currentDocument.add(new Field("bookID",  new Integer(i).toString(), Field.Store.YES, Field.Index.NO));    //exception if index.Tokenized
						allVerses.add(iCounter,  currentDocument);

						iCounter++;
						//aggiungere file ID?
					}
				}
			}




		}catch (SAXParseException err) {
			System.out.println ("** Parsing error" + ", line " 
					+ err.getLineNumber () + ", uri " + err.getSystemId ());
			System.out.println(" " + err.getMessage ());

		}catch (SAXException e) {
			Exception x = e.getException ();
			((x == null) ? e : x).printStackTrace ();

		}catch (Throwable t) {
			t.printStackTrace ();
		}


		return allVerses;

	}//end of parse()


	public void DBG(String msg) {
		if(dbg) {
			System.out.println("DBG: "+msg);
		}
	}


}