package com.scrawler.regex.template;



import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.htmlparser.NodeFilter;
import org.htmlparser.Parser;
import org.htmlparser.filters.HasAttributeFilter;
import org.htmlparser.filters.NodeClassFilter;
import org.htmlparser.filters.OrFilter;
import org.htmlparser.filters.TagNameFilter;
import org.htmlparser.tags.LinkTag;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Service;

import com.scrawler.html.template.SecondContentCrawlerPool;
import com.scrawler.main.InitProject;
import com.scrawler.tool.UrlFilter;
import com.scrawler.util.FileUtil;
import com.scrawler.util.HttpUtils;
import com.scrawler.vo.NewsContentConfig;
import com.scrawler.vo.WebConfig;

/**  
 * @Description: 描述
 * @author kuntang    
 * @version 1.0  
 * @created 2012-10-23 上午11:11:21 
 * Copyright (c) 2012 sohu,Inc. All Rights Reserved.
 */

@Service()
// 非单例
@Scope("prototype")
public class ParseIndexPageContent implements Runnable{

	@Autowired
	private HttpUtils httpUtils;
	
	@Autowired
	private Parser parser;
	
	
	/** 超链接过滤器  */
	public static NodeClassFilter linkFilter = new NodeClassFilter(LinkTag.class);
	
	/** url 正则匹配表达式*/
	public static final String URL_REGEXP_RULES = "^http://.*";
	
	private WebConfig config;
	private String url;
	
	public void init(WebConfig config,String url){
		this.config = config;
		this.url = url;
	}
	
	@Override
	public void run() {
		// TODO Auto-generated method stub	
		
		String content = httpUtils.getContentByUrl(url,config.getEnconding());
		
		System.out.println("url........."+url);
		//System.out.println("content:......>>>>"+content);		
		
		CopyOnWriteArrayList<String[]> links = parseUrl(content,config.getFilter());
		
		// 过滤掉无用的链接,video,pic等.
//		StringBuilder sb = new StringBuilder("give up invalid url: ").append("\r\n");
//		for(String urlStr : links){
//			if(!UrlFilter.matchUrl(urlStr)){
//				links.remove(urlStr);
//				sb.append(urlStr).append("\r\n");
//			}
//		}
		
		
//		if(config.getWeb_type() == 1){
//			System.out.println("==========>type="+config.getWeb_type());
//			for(String link : links){
//				NewsContentConfig newsConfig = CompomentUrl.newsContentConfig.get(config.getId());
//				if(newsConfig == null)continue;
//				CrawlArticleContent stask = InitProject.getContext().getBean(CrawlArticleContent.class);
//				stask.init(link, newsConfig);
//				SecondContentCrawlerPool.getInstance().addTask(stask);
//				System.out.println("add link============>"+link);
//			}
//		}
		
//		if(config.getWeb_type() == 2){
//			System.out.println("==========>type="+config.getWeb_type());
//			for(String link : links){
//				NewsContentConfig newsConfig = CompomentUrl.newsContentConfig.get(config.getId());
//				if(newsConfig == null)continue;
//				//CrawlArticleContent stask = InitProject.getContext().getBean(CrawlArticleContent.class);
//				//stask.init(link, newsConfig);
//				//SecondContentCrawlerPool.getInstance().addTask(stask);
//				System.out.println("add link============>"+link);
//			}
//		}
	}
	
	private CopyOnWriteArrayList<String[]> parseUrl(String content,String filters){
		if(filters == null || filters.length()<1){
			//LOG.info("filters length<1,return");
			return new CopyOnWriteArrayList();
		}
		
		CopyOnWriteArrayList<String[]> links = new CopyOnWriteArrayList<String[]>();
		/** 匹配正文html代码的过滤器*/

		try {
			// parser.setInputHTML(content);
			 Pattern p = Pattern.compile(config.getFilter());
			
			 Matcher m = p.matcher(content);//开始编译
			 System.out.println("group count: "+m.groupCount()+config.getFilter()); 
			 while (m.find()) {
			    	String values[] = new String[m.groupCount()];
			    	for(int i=0;i<values.length;i++){
				        values[i]=m.group(i+1);
				        System.out.println("group i:" + values[i]);
			    	}
			    	links.add(values);
			    }
			System.out.println("validLinks size:"+links.size()+",");
			for(int i=0;i<links.size();i++){				
				url = links.get(i)[0];
				// 提取到的是相对路径时,拼凑完整url
				if(!url.matches(URL_REGEXP_RULES)){
					url = config.getPreUrl()+url;
				}
				System.out.println(url+"===>"+url);
			}
		} catch(Exception e){
			e.printStackTrace();
		}
		return links;
	}

	
	public NodeFilter[] createFilters(List<String[]> names){
		NodeFilter filters[] = new NodeFilter[names.size()];
		for(int i=0;i<names.size();i++){
			String values[] = names.get(i);
			filters[i] = new HasAttributeFilter(values[0],values[1]);
		}
		return filters;
	}
	
	public NodeFilter[] createFilters(String value){
		NodeFilter[] filters = new NodeFilter[1];
		if(value == null)return null;
		String keyValue[] = value.split("=");
		if(keyValue == null || keyValue.length<2)return null;
		filters[0] = new HasAttributeFilter(keyValue[0],keyValue[1]);
		return filters;
	}
	
	public NodeFilter[] createFiltersByTagName(String value){
		
		if(value == null)return null;
		String keyValue[] = value.split(",");
		if(keyValue == null)return null;
		NodeFilter[] filters = new NodeFilter[keyValue.length];
		for(int i=0;i<filters.length;i++){
			filters[i] = new TagNameFilter(keyValue[i]);
		}
		return filters;
	}
	
	
	
	
}
