package com.clustertech.crawler.analyzer;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.htmlparser.Node;
import org.htmlparser.NodeFilter;
import org.htmlparser.Parser;
import org.htmlparser.filters.HasAttributeFilter;
import org.htmlparser.filters.LinkRegexFilter;
import org.htmlparser.filters.TagNameFilter;
import org.htmlparser.lexer.Lexer;
import org.htmlparser.lexer.Page;
import org.htmlparser.tags.LinkTag;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;

import com.clustertech.crawler.domain.NewsFeed;
import com.clustertech.crawler.utils.SinaNewsExtractor;

public class SinaNewsAnalyzer extends ClusterAnalyzer {

	
	@Override
	public NewsFeed getNewsFeed(String html) {
		// TODO Auto-generated method stub
		
		String title = SinaNewsExtractor.getTitle(html);
		String timeStamp = SinaNewsExtractor.getTimeStamp(html);
		String body = SinaNewsExtractor.getBody(html);
		
		NewsFeed news = new NewsFeed();
		news.setTitle(title);
		news.setTimeStamp(timeStamp);
		news.setBody(body);
		
		return news;
	}

}
