/**
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package webCrawler;

import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Iterator;
import java.util.Set;
import java.util.regex.Pattern;

import org.apache.http.Header;

import edu.uci.ics.crawler4j.crawler.Page;
import edu.uci.ics.crawler4j.crawler.WebCrawler;
import edu.uci.ics.crawler4j.parser.HtmlParseData;
import edu.uci.ics.crawler4j.url.WebURL;
import pojo.web.WebContent;
import service.web.WebContentService;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.beans.factory.annotation.Autowired;


public class BasicCrawler extends WebCrawler {

  WebContentService wcs;
	

/**
 * @param wcs the wcs to set
 */
public void setService(WebContentService wcs) {
	this.wcs = wcs;
}

private static final Pattern IMAGE_EXTENSIONS = Pattern.compile(".*\\.(bmp|gif|jpg|png)$");
  private static final String urlFilterWords[]={"zajia","caozhi","renjian","photoview","data"};

  protected static SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
  private String allowUrls[] = {"http://tech.163","http://mobile.163"};
  private String subUrls[]= {".html"};
  /**
   * You should implement this function to specify whether the given url
   * should be crawled or not (based on your crawling logic).
   */
  @Override
  public boolean shouldVisit(Page referringPage, WebURL url) {
    String href = url.getURL().toLowerCase();
    // Ignore the url if it has an extension that matches our defined set of image extensions.
    if (IMAGE_EXTENSIONS.matcher(href).matches()) {
      return false;
    }
    for(String allowUrl : allowUrls){
    	if(href.startsWith(allowUrl)){
    		return true;
    	}
    }
    for(String allowUrl:subUrls){
    	if(href.endsWith(allowUrl)){
    		return true;
    	}
    }
    return false;
    
    // Only accept the url if it is in the "www.ics.uci.edu" domain and protocol is "http".
   // return href.startsWith("http://sports");
  }

  /**
   * This function is called when a page is fetched and ready to be processed
   * by your program.
   */
  @Override
  public void visit(Page page) {
    int docid = page.getWebURL().getDocid(); 
    String url = page.getWebURL().getURL();
    
    
    for(int i=0;i<urlFilterWords.length;i++){
    	if(url.contains(urlFilterWords[i]) ){
        	System.out.println("跳过："+url);
        	return ;
        }    	
    }
   //过滤URL以.html结尾的
    if(!url.endsWith(".html")){ 
    	System.out.println("跳过："+url);
    	return ;
    }
    String domain = page.getWebURL().getDomain();
    String path = page.getWebURL().getPath();
    String subDomain = page.getWebURL().getSubDomain();
    String parentUrl = page.getWebURL().getParentUrl();
    String anchor = page.getWebURL().getAnchor();

    logger.debug("Docid: {}", docid);
    logger.info("URL: {}", url);
    logger.debug("Domain: '{}'", domain);
    logger.debug("Sub-domain: '{}'", subDomain);
    logger.debug("Path: '{}'", path);
    logger.debug("Parent page: {}", parentUrl);
    logger.debug("Anchor text: {}", anchor);

    if (page.getParseData() instanceof HtmlParseData) {
      HtmlParseData htmlParseData = (HtmlParseData) page.getParseData();
      String text = htmlParseData.getText();
      String html = htmlParseData.getHtml();
      
     
      Document doc = Jsoup.parse(html);
      Element title=doc.select("h1").first();
    
      //System.out.println(title.text());
      String timesoure=doc.getElementsByClass("post_time_source").text();
      String parts[]=timesoure.split("\\s+");
      String date=parts[0];
      String source=parts[2];    
      Elements eles = doc.getElementsByClass("post_text");    
      Iterator<Element> it = eles.iterator();  
      while(it.hasNext()){
    	  Element e=it.next();
    	  String newscontent=e.text();
    	 if(newscontent!=null){
    		 WebContent c=new WebContent();
    	      c.setUrl(url);
    	      c.setTitle(title.text());
    	      try {
				c.setDate(sdf.parse(date+" 00:00:00"));
			} catch (ParseException e1) {
				// TODO Auto-generated catch block
				e1.printStackTrace();
			}
    	      
    	      c.setSource(source);
    	      c.setContent(newscontent);
    	      
    	      c.printMember(c);
    	      if(wcs!=null){
    	    	  wcs.insertWebContent(c);
    	      }else{
    	    	  System.out.println("DataBase service empty");
    	      }
    	 }
      }
      
      
      Set<WebURL> links = htmlParseData.getOutgoingUrls();
      logger.debug("Text length: {}", text.length());
      logger.debug("Html length: {}", html.length());
      logger.debug("Number of outgoing links: {}", links.size());
    }

    Header[] responseHeaders = page.getFetchResponseHeaders();
    if (responseHeaders != null) {
      logger.debug("Response headers:");
      for (Header header : responseHeaders) {
        logger.debug("\t{}: {}", header.getName(), header.getValue());
      }
    }
    logger.debug("=============");
  }
}
