/**
 *  
 *  
 *   @Description    抓取文章内容并挖掘文章数据,如,时间,来源,出链接等相关信息,
 *   				  实现了Runnable接口,可并发抓取文章内容和挖掘信息
 *   @creator         tangkun
 *   @create-time     2011-7-15
 *   @revision        $Id
 */

package com.scrawler.regex.template;

import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.htmlparser.Node;
import org.htmlparser.NodeFilter;
import org.htmlparser.Parser;
import org.htmlparser.tags.ScriptTag;
import org.htmlparser.tags.StyleTag;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Service;

import com.scrawler.mysql.dao.NewsContentSql;
import com.scrawler.util.HttpUtils;
import com.scrawler.vo.NewsContent;
import com.scrawler.vo.NewsContentConfig;

@Service()
//非单例
@Scope("prototype")
public class CrawlArticleContent implements Runnable {
	private String url;
	
	@Autowired
	private HttpUtils httpUtils;
	
	@Autowired
	private Parser parser;

	private NewsContentConfig config;
	
	/**
	 * 构造函数
	 * @param url  		要挖掘文章信息的url
	 * @param index		进行文本存储的索引编号
	 * @param newsinfo	挖掘到信息的存储体
	 */
	
	public void init(String url,NewsContentConfig config) {
		this.url = url;
		this.config = config;
	}

	@Override
	public void run() {
		// TODO Auto-generated method stub
		NewsContent newsContent = new NewsContent();
		newsContent.setUrl(url);
		newsContent.setType_id(config.getTypeId());
		try {
			String content = httpUtils.getContentByUrl(url, config.getEncoding());
			content = content.replaceAll("\\s+", " ");
			List<String> regexs = new ArrayList<String>();
			regexs.add(config.getTitle());
			regexs.add(config.getAuthor());
			regexs.add(config.getContent());
			regexs.add(config.getHits());
			regexs.add(config.getPub_time());
			regexs.add(config.getComments());
			String values[] = new String[regexs.size()];
			
			  for(int i=0;i<regexs.size();i++){
				  String regex = regexs.get(i);
				  if(regex==null || regex.length()<1)continue;
		    	  Pattern title = Pattern.compile(regex);
		    	  Matcher titleM = title.matcher(content);//开始编译
		    	  System.out.println(regex);
		    	  while (titleM.find()) {
		    		 String value =titleM.group(1);		    	 	
		    	     System.out.println("regex,"+regex+",value: "+value);
		    	     values[i]=value;
		    	   }
		    }
			newsContent.setTitle(values[0]);			
			newsContent.setAuthor(values[1]);	
			newsContent.setContent(values[2]);
			newsContent.setHits(values[3]);			
			newsContent.setPub_time(values[4]);			
			newsContent.setComments(values[5]);
			
		} catch(Exception e){
			e.printStackTrace();
			return;
		}
		//NewsContentSql.insert(newsContent);
//		System.out.println("==================================================>");
//		System.out.println(url);
//		System.out.println(newsContent);
	}

	/*
	 * 
	 * 测试类
	 */

	public static void main(String[] args) {
		// TODO Auto-generated method stu
		// System.out.println(CrawlIfeng.class.getClassLoader().getResource("/").getPath());
		// CrawlArticleContent page2 = new
		// CrawlArticleContent("http://finance.ifeng.com/money/special/gstz/20110531/4093842.shtml","geshui",2);
		// exec.execute(page);
		// exec.execute(page2);
	}
}
