package com.clustertech.crawler.utils;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.htmlparser.Node;
import org.htmlparser.NodeFilter;
import org.htmlparser.Parser;
import org.htmlparser.filters.HasAttributeFilter;
import org.htmlparser.filters.LinkRegexFilter;
import org.htmlparser.filters.TagNameFilter;
import org.htmlparser.lexer.Lexer;
import org.htmlparser.lexer.Page;
import org.htmlparser.tags.LinkTag;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;

public class SinaNewsExtractor {
	
	public static final String DATAPATTERN = "(\\d{4}).*(\\d{2}).*(\\d{2}).*(\\d{2}:\\d{2})";
	public static final String URLPATTERN = "http://news\\.sina\\.com\\.cn/w/\\d{4}-\\d{2}-\\d{2}/\\d+\\.shtml";
	public static final String ENCODING = "GB2312";
	public static String getTitle(String html) {
		Page page = new Page(html, ENCODING);
		String title = "";
		Lexer lexer = new Lexer(page);
		Parser parser = new Parser(lexer);
		NodeFilter titleFilter = new TagNameFilter("title");
		
		try {
			NodeList nodes= parser.extractAllNodesThatMatch(titleFilter); 
			for (int i = 0; i < nodes.size(); i++) {
				title = nodes.elementAt(i).toPlainTextString();
				title = title.substring(0, title.length() - 9);
			}
		} catch (ParserException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return title;
	}
	
	public static String getTimeStamp(String html) {
		Page page = new Page(html, ENCODING);
		String timeStamp = "";
		Lexer lexer = new Lexer(page);
		Parser parser = new Parser(lexer);
		NodeFilter dateFilter = new HasAttributeFilter("id", "pub_date");
		Pattern r = Pattern.compile(DATAPATTERN);
		try {
			NodeList nodes= parser.extractAllNodesThatMatch(dateFilter); 
			for (int i = 0; i < nodes.size(); i++) {
				String date = nodes.elementAt(i).toPlainTextString();
				Matcher m = r.matcher(date);
				if (m.find()) {
					timeStamp = m.group(1) + "-" + m.group(2) + "-" + m.group(3) + " " + m.group(4);
				}
			}
		} catch (ParserException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return timeStamp;
	}
	
	public static String getBody(String html) {
		Page page = new Page(html, ENCODING);
		Lexer lexer = new Lexer(page);
		Parser parser = new Parser(lexer);
		NodeFilter bodyFilter = new HasAttributeFilter("id", "artibody");
		String body = "";
		try {
			NodeList nodes= parser.extractAllNodesThatMatch(bodyFilter); 
			for (int i = 0; i < nodes.size(); i++) {
				Node node = nodes.elementAt(i);
				lexer = new Lexer(node.toHtml());
				parser = new Parser(lexer);
				NodeFilter pFilter = new TagNameFilter("p");
				NodeList subs = parser.extractAllNodesThatMatch(pFilter); 
				for (int j = 0; j < subs.size(); j++) {
					String pStr = subs.elementAt(j).toPlainTextString();
					String[] segments = pStr.split("\n");
					if (segments.length > 2) {
						pStr = segments[0] + segments[segments.length - 1];
					}
					body += pStr.replace(" ", "");
				}
			}
		} catch (ParserException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return body;
	}
	
	public static Map<String, String> getURLs(String html) {
		Map<String, String> urls = new HashMap<String, String>();
		Page page = new Page(html, ENCODING);
		Parser parser = new Parser(new Lexer(page));
		NodeFilter filter = new LinkRegexFilter(URLPATTERN);
		
		try {
			NodeList nodes= parser.extractAllNodesThatMatch(filter);
			for (int i = 0; i < nodes.size(); i++) {
				LinkTag node = (LinkTag) nodes.elementAt(i);
				String url = node.getAttribute("href");
				if (!urls.containsKey(url)) {
					urls.put(url, url);
				}
			}
		} catch (ParserException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return urls;
	}
	
	public static String crawlPage(String link) {
		String html = "";
		InputStream is = null;
		URL url = null;
		HttpURLConnection connection = null;
		BufferedReader reader = null;
		try {
			
			url = new URL(link);
			connection = (HttpURLConnection) url.openConnection();
			connection.connect();
			is = connection.getInputStream();
			reader = new BufferedReader(new InputStreamReader(is, ENCODING));
			String line = null;
			while ((line = reader.readLine()) != null) {
				html += line + "\n";
			}
		} catch (MalformedURLException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} finally {
			try {
				is.close();
			} catch (IOException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
		}
		return html;
	}
	
}
