/**
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.NooLab.nativebrowser.app.controller.parser;


import java.net.URL;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;



import org.NooLab.nativebrowser.app.config.BasicConfigurations;
import org.NooLab.utilities.strings.StringsUtil;
import org.NooLab.utilities.url.URLCanonicalizer;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;


/*
 * replacing with jsoup
 * 
 * a particular element with <div></div>
 * element.replaceWith(new Element(Tag.valueOf("div"), ""));
 * 
 * content from wiki Element iamcontaningIDofintendedTAG= doc.select("#iamID") ; 
 */


/**
 * 
 * alternatively, we could use jsoup
 * 
 *
 */
public class HTMLParser implements HTMLParsedDocumentIntf{

	protected static final int MAX_OUT_LINKS = BasicConfigurations.getIntProperty( "fetcher.max_outlinks", 500);

	private static final String _CR_PROTECTION = "$$CR$$";

	Document soupdoc ;
	PageIntf page;
	
	private String text;
	private String title;
	protected String htmlStr="";
	
	// we may run them in parallel... they are independent, and 
	// each of them will create lists with array positions !!
	private TextsExtractor textsExtractor;
	private LinksExtractor linksExtractor;
	private DivExtractor   divExtractor;   // jsoup? detects beginning and end of a particular div
	private TableExtractor tableExtractor; // translates into internal untyped ArrayList<ArrayList>
	
	UrlDescriptors urlDescriptors ; 
	
	protected StringsUtil strgutil = new StringsUtil();

	
	// ========================================================================
	public HTMLParser() {
		page = new Page();
		init();
	}
	public HTMLParser(PageIntf page) {
		
		this.page = page;
		init();
	}

	// ========================================================================
	

	private void init() {
		// 
		
	}
	
	/*
	 	Document doc = Jsoup.connect("http://en.wikipedia.org/wiki/Boston").get();
		Element contentDiv = doc.select("div[id=content]").first();
		contentDiv.toString(); // The result
		contentDiv.text()
		
	 */
	public void parse( String htmlContent, String contextURL) {
		
		URL url;
		String baseURL="" ;
		int urlCount ;
		String hrefWithoutProtocol; 
		
		htmlStr = htmlContent;
		
		htmlStr = strgutil.replaceAll(htmlStr, "</br>", _CR_PROTECTION );
		htmlStr = strgutil.replaceAll(htmlStr, "> ", ">");
		htmlStr = strgutil.replaceAll(htmlStr, " <", "<");
		
		soupdoc = Jsoup.parse(htmlContent);
		
		divExtractor = new DivExtractor(this, soupdoc);
		divExtractor.parse();
		
		textsExtractor = new TextsExtractor(this, soupdoc); //contentDiv.text().
		
		linksExtractor = new LinksExtractor(this, soupdoc)  ;
		linksExtractor.parse(contextURL);
				
		urlDescriptors = linksExtractor.getRichUrls();
		
		if (baseURL != null) {
			contextURL = baseURL;
		}
		
				// returning to "ResultsDigester"
		urlCount = 0;
	}

	public String getText() {
		return text;
	}

	public String getTitle() {
		return title;
	}
	public void parse() {
		// 
		parse(page.getHTML(), page.getContextUrl());
		
	}

	
}
