package arbor.mining.util.net;

import java.util.Properties;
import java.util.StringTokenizer;

import org.htmlparser.NodeFilter;
import org.htmlparser.Parser;
import org.htmlparser.filters.NodeClassFilter;
import org.htmlparser.filters.TagNameFilter;
import org.htmlparser.lexer.Page;
import org.htmlparser.parserapplications.LinkExtractor;
import org.htmlparser.parserapplications.StringExtractor;
import org.htmlparser.tags.LinkTag;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;

import arbor.mining.util.UtilItem;
import arbor.mining.util.cache.FileStringCache;
import arbor.mining.util.cache.StringCache;

/**
 * WebStringConn is used to retrieve String information with the help of
 * htmlparser. In terms of plain html pages, it is suggested to use WebPlainConn
 * instead. We have use StringCache in WebStringConn that will help to reduce
 * unnecessary re-connection. Turn off cache if you need forcing re-connection.
 * 
 * @author doug
 * 
 */
public class WebStringConn extends UtilItem {

	public static boolean Web_Connect_DEBUG = false;

	StringCache cache = null;

	public WebStringConn(StringCache cache) {
		super();
		this.cache = cache;
		if (Web_Connect_DEBUG) {
			if (cache == null)
				logger.warn("No cache used!!");			    
		}
	}
	@Override
	public Properties getInitProperty() {
		Properties p = new Properties();
		p.put("Sleep_Time", "3000");
		return p;
	}  
	public String getHTML_by_LinkExtractor(String url) {
		LinkExtractor le;
		String fileName = (url.replaceFirst("http://", ""))
				.replaceAll("/", "_");
		fileName = fileName + ".le";
		String content = null;
		if (this.cache != null) {
			content = cache.getDataFromCache(fileName);
		}
		if (content == null) {
			Parser parser;
			NodeFilter filter;
			NodeList list;
			StringBuffer buf = new StringBuffer();

			filter = new NodeClassFilter(LinkTag.class);
			/*
			 * filter = new AndFilter ( filter, new NodeFilter () { public
			 * boolean accept (Node node) { return (((LinkTag)node).isMailLink
			 * ()); } } );
			 */
			/**
			 * Since we may repeatly connect to a server and face the server
			 * prevention of DOS, we access the page in random time.
			 */
			try {
				Thread.sleep((long) (Math.random() * Integer.parseInt(prop.getProperty("Sleep_Time"))));
			} catch (InterruptedException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}

			try {
				parser = new Parser(url);
				list = parser.extractAllNodesThatMatch(filter);
				for (int i = 0; i < list.size(); i++)
					buf.append(list.elementAt(i).toHtml().toString() + "\n");
				content = buf.toString();
				if (Web_Connect_DEBUG)
					logger.info(content);
			} catch (ParserException e) {
				e.printStackTrace();
			}

			if (cache != null) {
				cache.insert2Hash(fileName, content);
			}
		}
		return content;
	}

	/**
	 * The goal of getAllTableContent() will help to retrieve the table content.
	 * For ease to integrate into our cache system, we report "Stringize" table content
	 * where data entry is separated by line and each line is represented as
	 * "line id,content".
	 * Please parse the content by yourself.
	 * @param url
	 * @param isBig5 if we need to decode the content as big5 code.
	 * @return
	 */
	public String getAllTableContent(String url, boolean isBig5, boolean needLineID) {
		NodeList list = null;
		Parser parser = new Parser();//whatever be the file you r parsing give to constructor
		String fileName = (url.replaceFirst("http://", ""))
		                  .replaceAll("/", "_");

        fileName = fileName + ".table";
        String content = null;
        if (this.cache != null) {
	      content = cache.getDataFromCache(fileName);
        }
        if (content == null) {
          StringBuffer buf = new StringBuffer();
		  try {
			parser.setURL(url);
		  } catch (ParserException e) {
			logger.error(e);
		  }
		  /**
		   * The tag of a table is TD.
		   */
		  TagNameFilter filter = new TagNameFilter("TD");
		  try {
			list = parser.extractAllNodesThatMatch(filter);
		  } catch (ParserException e) {
			  logger.error(e);
		  }

 		  try {
			for (int i = 0; i < list.size(); i++) {
				if (!isBig5) {
					if (!needLineID)
						  buf.append(list.elementAt(i).toPlainTextString()+"\n");
					else 						
					  buf.append(i + ","
				  		+ list.elementAt(i).toPlainTextString()+"\n");
				}
				else {
					if (!needLineID)
						  buf.append(new String(list.elementAt(i).toPlainTextString()
									.getBytes("ISO-8859-1"), "Big5")+"\n");
					else 											
					  buf.append(i+","+new String(list.elementAt(i).toPlainTextString()
						.getBytes("ISO-8859-1"), "Big5")+"\n");
				}
			}
			} catch (Exception e) {
				logger.error(e);
			}
			if (cache != null)
			  cache.insert2Hash(fileName, buf.toString());
			
		    return buf.toString();
	  }
        else {
        	return content;
        }
	}
	
	public String getHTML_by_Parser(String url, String tag) {
		Parser parser;
		NodeFilter filter;
		String fileName = (url.replaceFirst("http://", ""))
				.replaceAll("/", "_");
		fileName = fileName + ".tag_" + tag;

		String content = null;
		if (this.cache != null) {
			content = cache.getDataFromCache(fileName);
		}
		if (content == null) {

			/**
			 * Since we may repeatly connect to a server and face the server
			 * prevention of DOS, we access the page in random time.
			 */
			try {
				Thread.sleep((long) (Math.random() * Integer.parseInt(prop.getProperty("Sleep_Time"))));

			} catch (InterruptedException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}

			try {
				parser = new Parser();
				filter = new TagNameFilter(tag);

				Page.getConnectionManager().setRedirectionProcessingEnabled(
						true);
				Page.getConnectionManager().setCookieProcessingEnabled(true);
				parser.setResource(url);
				content = parser.parse(filter).toString();
				if (Web_Connect_DEBUG)
					logger.info(content);
			} catch (ParserException e) {
				e.printStackTrace();
			}
			if (cache != null) {
					cache.insert2Hash(fileName, content);
			}
		}
		return content;
	}

	public String getHTML_by_StringExtractor(String url) {
		StringExtractor se;
		String fileName = (url.replaceFirst("http://", ""))
				.replaceAll("/", "_");
		fileName = fileName + ".se";
		String content = null;
		if (this.cache != null) {
			content = cache.getDataFromCache(fileName);
		}
		if (content == null) {
			/**
			 * Since we may repeatly connect to a server and face the server
			 * prevention of DOS, we access the page in random time.
			 */
			try {
				Thread.sleep((long) (Math.random() * Integer.parseInt(prop.getProperty("Sleep_Time"))));

			} catch (InterruptedException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}

			se = new StringExtractor(url);
			try {
				content = se.extractStrings(true);
			} catch (ParserException e) {
				e.printStackTrace();
			}

			if (Web_Connect_DEBUG)
				logger.info(content);

			if (cache != null) {
				cache.insert2Hash(fileName, content);
			}

		}
		return content;
	}

	public static void main(String[] args) {
		// String url =
		// "http://travel.yahoo.com/p-travelguide-2735143-alp-z-b-61-new_york_city_restaurants-i;_ylt=AougvpnV3_cA6vd3ncdPrN.cFmoL";
		String url = "http://travel.yahoo.com/p-travelguide-2735143-alp-z-b-11-new_york_city_restaurants-i-sort-pop";
		// HTMLConnector connector = new HTMLConnector(StringCache.NoCache);
		WebStringConn connector = new WebStringConn(new FileStringCache());
		System.out.println(connector.getHTML_by_Parser(url, "script"));
	}
}
