package com.scrawler.html.template;



import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.htmlparser.Node;
import org.htmlparser.NodeFilter;
import org.htmlparser.Parser;
import org.htmlparser.filters.NodeClassFilter;
import org.htmlparser.filters.OrFilter;
import org.htmlparser.tags.LinkTag;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;

import com.scrawler.udt.HTag;
import com.scrawler.util.HtmlParseUtils;
import com.scrawler.util.HttpUtils;

/**  
 * @Description: 描述
 * @author kuntang    
 * @version 1.0  
 * @created 2012-10-23 上午11:11:21 
 * Copyright (c) 2012 sohu,Inc. All Rights Reserved.
 */

public class CrawlTiebaContentBase {


	private HttpUtils httpUtils = new HttpUtils();
	
	private Parser parser = new Parser();
	
	private HtmlParseUtils  htmlUtils = new HtmlParseUtils();
	
	
	/*------------ 异或过滤器,保留加粗和正文的div区域和链接 -----------------------*/
	private final static  OrFilter fontOrLinkFilter = new OrFilter(new NodeFilter[]{
			// new HasAttributeFilter("class","tit01 cDGray"),			
			 new NodeClassFilter(LinkTag.class),	//链接过滤器
			 new NodeClassFilter(HTag.class)
		 });		
	
	
	/** 超链接过滤器 */
	private NodeClassFilter linkFilter = new NodeClassFilter(LinkTag.class);
	
	private List<Set<String>> parsePage(String content,NodeFilter filters[]){
		if(filters == null || filters.length<1){
			// LOG.info("filters length<1,return");
			return Collections.emptyList();
		}
		List<Set<String>> links = new ArrayList<Set<String>>();
		/** 匹配正文html代码的过滤器*/
		OrFilter headerOrfooterFilter = new  OrFilter();
		headerOrfooterFilter.setPredicates(filters);
		NodeList divs = null;
		int size = 0;
		StringBuilder sb = new StringBuilder();
		try {
			parser.setInputHTML(content);
			divs = parser.extractAllNodesThatMatch(headerOrfooterFilter);
			if(divs != null){
				size = divs.size();
				System.out.println(divs.size());
			}
			for(int m=0;m<size;m++){
				
				NodeList childs= divs.elementAt(m).getChildren();
				handleChilds(childs);
				
				System.out.println(divs.elementAt(m).toPlainTextString());
				String html = divs.elementAt(m).toHtml(true);
				sb.append(html);
				System.out.println(html);
			}
			
			System.exit(1);
			//
			parser.setInputHTML(sb.toString());	
			NodeList validLinks = parser.extractAllNodesThatMatch(fontOrLinkFilter);
			
		
			
		} catch (ParserException e) {
			// TODO Auto-generated catch block
			return Collections.emptyList();
		} 
		return links;
	}
	
	public Map<String,Float> getFiledValueByUrl(String url,NodeFilter []filters){
		String content = httpUtils.getContentByUrl(url);
		// System.out.println(content);
	
		//FileUtil.writeStringToDisk("d:\\context.txt", content);
		// list(0)为有效链接,list(1)为加粗链接集合
		List<Set<String>> links = parsePage(content,filters);
		if(links.size() != 2){
			return Collections.emptyMap();
		}
		Set<String> validUrl = links.get(0);

		return null;
	}
	
	
	public void handleChilds(NodeList nodes){
		if(nodes == null||nodes.size()<1)return;
		for(int i=0;i<nodes.size();i++){
			Node node = nodes.elementAt(i);
			handleChilds(node.getChildren());
			System.out.println(node.getText());
		}
	}
	
	
}
