package com.sentiment.crawler;

import java.io.File;
import java.io.IOException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;

import org.apache.log4j.Logger;
import org.jsoup.Connection;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import com.sentiment.config.Config;
import com.sentiment.db.CrawlingPageDB;
import com.sentiment.db.PatentInfoDB;
import com.sentiment.obj.CrawlingPage;
import com.sentiment.obj.PatentInfo;
import com.sentiment.webcollector.util.CharsetDetector;
import com.sentiment.webcollector.util.FileUtils;

public class WanFangCrawler{
	
	private static Logger logger = Config.getLogger(new WanFangCrawler().getClass());
	//private static Logger logger = Logger.getLogger(new WanFangCrawler().getClass());
	//private static String path = "D:\\javaworkspace\\Detection1.1\\test\\";
	private static List<PatentInfo> PInfo = new ArrayList<PatentInfo>();
	private List<CrawlingPage> li = new ArrayList<CrawlingPage>();
	
	AtomicInteger id=new AtomicInteger(0);
	
	
	/**
	 * 利用页面的url进行连接，获取页面内容
	 * @param url 页面url
	 * @return  返回页面内容（Document）
	 * @throws IOException
	 */
	public static Document Connect(String url) throws IOException{
		
		logger.info("Connect url "+url);
		Connection connection = Jsoup.connect(url);  
	    connection.timeout(5000);//设置连接超时时间  
	    //给服务器发消息头，告诉服务器，俺不是java程序。CSDN不允许java程序访问  
	    //connection.header("User-Agent","Mozilla/4.0 (compatible; MSIE 5.0; Windows XP; DigExt)");  
	    Document doc = connection.get();//获取返回的html的document对象
	    return doc;
	}
	/**
	 * 利用关键字进行搜索，并将搜索到的页面结果保存到Config.crawlerHtmlPath目录下
	 * @param keyword  查询关键字
	 */
	
	public void getSearch(String keyword){
		
		String url;
		String PatentTypegreen;
		try {
			url = "http://patentool.wanfangdata.com.cn/Patent/Search?Query="+URLEncoder.encode(keyword, "utf-8");
			
			Document doc = Connect(url);
			
		    //获取下一层网页链接 
	        Elements links = doc.select(".IpctList");
	        
	        for (Element e : links) {  
	        
	                Element li = e.select("li").first();
	                Element link = li.select(".sePatentname").first().select("a").first();
	                String href = link.attr("href");
	                PatentTypegreen = li.select(".sePatentType").select(".sePatentTypegreen").text();
	                String detail = "http://patentool.wanfangdata.com.cn/" + href;
	                Document article = Connect(detail);
	                byte[] content = article.html().getBytes();
	                String encode = CharsetDetector.guessEncoding(content);
	                String html = "page-" + id.getAndIncrement() + ".html";
	    			try {
	    				FileUtils.writeFileWithParent(Config.htmlSavingPath + html, content);
	    				//FileUtils.writeFileWithParent(path + html, content);
	    				logger.info("Save Page: " + html);
	    			} catch (IOException ex) {
	    				logger.error(ex.toString());
	    			}
	                logger.info("PatentTypegreen: " + PatentTypegreen);
	    			GetPageInfo(html, encode, href, PatentTypegreen);
	       }  
	        
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}   
		CrawlingPageDB cpdb = new CrawlingPageDB();
		cpdb.cleanAll();
		for (CrawlingPage cp : li) {
			// 将页面编码信息、URL地址、文件名等存储进数据库
			cpdb.insertData(cp);
		}
		cpdb.DBClose();
        logger.info("WanFangCrawlerConnection  finished");
	}
	/**
	 * 根据文件名和html文件编码格式获得页面内容
	 * @param FileName  文件名
	 * @param encode  编码类型
	 * @param url  文件url
	 */
	 private void GetPageInfo(String FileName, String encode, String url, String PatentTypegreen) {
		 
		 String patentAbstract = "";//摘要
		 String principalClaim = "";//主权项
		 String patentInfo = "";
		 String patentTitle = "";
		 int i = 0;
		 File fl = new File(Config.htmlSavingPath + FileName);
		 //File fl = new File(path + FileName);
		 try{
				Element article = Jsoup.parse(fl, encode);
				//找出正文段
				Element table = article.select("table").first();
				Elements detail = table.select("tr");
				for(Element e: detail){
					i ++;
					if(i < 6){
						if(i < 4){
							if(i == 1)
								patentTitle = e.select("td").last().text();
							patentInfo = patentInfo + "--" + e.select("td").last().text();
						}else if(i == 5){
							patentAbstract = e.select("td").last().text();
						}else{
							principalClaim = e.select("td").last().text();
						}
					}else{
						String CDPCLC = e.select(".CDPCLC").text();
						String CDPHolder = e.select(".CDPHolder").text();
						if(!CDPCLC.isEmpty()){
							patentInfo = patentInfo + "--" + CDPCLC;
						}else{
							patentInfo = patentInfo + "--" + "null";
						}
						if(!CDPHolder.isEmpty()){
							patentInfo = patentInfo + "--" + CDPHolder;
						}else{
							patentInfo = patentInfo + "--" + "null";
						}
					}
					
				}
				PatentInfo info = new PatentInfo(patentInfo,patentAbstract,principalClaim,PatentTypegreen);
				PInfo.add(info);
			}catch(Exception e){
				e.printStackTrace();
			}
		 PatentInfoDB pantent = new PatentInfoDB();
		 pantent.cleanAll();
		 for(PatentInfo info:PInfo){
			 pantent.saveData(info);
		 }
		 pantent.DBClose();
	}
	 
	public static void main(String[] args){
		WanFangCrawler crawler = new WanFangCrawler();
		 crawler.getSearch("佘堃");
	 }
}
