package com.morpx.sim.crawl;

import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URL;
import java.security.MessageDigest;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import com.google.gson.Gson;
import com.morpx.sim.apps.CrawlerThread;
import com.morpx.sim.storage.Item;

import edu.uci.ics.crawler4j.crawler.CrawlConfig;
import edu.uci.ics.crawler4j.crawler.CrawlController;
import edu.uci.ics.crawler4j.crawler.Page;
import edu.uci.ics.crawler4j.crawler.WebCrawler;
import edu.uci.ics.crawler4j.fetcher.PageFetcher;
import edu.uci.ics.crawler4j.parser.BinaryParseData;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer;
import edu.uci.ics.crawler4j.url.WebURL;
import edu.uci.ics.crawler4j.util.IO;

/**
 * The crawler that parses the vip.com's sale page and store all the products.
 * @author tianli
 */
public class GenericCrawler extends WebCrawler {
	//用于过滤不需要的后缀
  private static final Pattern LINK_FILTER = Pattern.compile(".*(\\.(css|js|mid|mp2|mp3|mp4|wav|avi|mov|mpeg|ram|m4v"
                  + "|rm|smil|wmv|swf|wma|zip|rar|gz))$");
  private static Pattern downloadFilter;
  
  private static String sourceDomain;
  private static File storageFolder;
  
  //配置函数，配置storageFolder，sourceDomain，downloadFilter
  public static void configure(String srcDomain,
                               String downloadPattern,
                               String storageFolderName) {
    storageFolder = new File(storageFolderName);
    if (!storageFolder.exists()) {	//如果文件夹不存在，创建
      storageFolder.mkdirs();
    }
    
    sourceDomain = srcDomain;
    //用于过滤不需要的下载
    downloadFilter = Pattern.compile(downloadPattern);
  }
  //只访问当前层和下一层，过滤不需要的后缀
  // Only visit the top level show pages and all the next page links.
  @Override
  public boolean shouldVisit(WebURL url) {
    String href = url.getURL().toLowerCase();
    if (LINK_FILTER.matcher(href).matches()) {
      return false;
    }
    return true;
  }
  //解析和保存页面上的内容
  // Parse and save the contents on the page.
  @Override
  public void visit(Page page) {
    String url = page.getWebURL().getURL();
    System.err.println("Visiting " + url);
    
    //我们只对二进制数据感兴趣，如果不是二进制的不保存
    // We are only interested in processing images
    if (!(page.getParseData() instanceof BinaryParseData)) {
      return;
    }
    //符合下载过滤条件的的不保存
    if (!downloadFilter.matcher(url).matches()) {
      return;
    }

    // get a unique name for storing this image
    try {
    	//来自URL的全路径（不包括网址）
      String filePath = (new URI(url.replace(sourceDomain, ""))).getPath();
      //文件名
      String fileName = filePath.substring(filePath.lastIndexOf("/") + 1);
      //实际路径
      filePath = filePath.replace(fileName, "");
      //保存的路径
      File saveToPath = new File(storageFolder.getAbsolutePath() + "/" + filePath);
      if (!saveToPath.exists()) {	//如果不存在，则创建
        saveToPath.mkdirs();
      }
      //保存图像
      // store image
      IO.writeBytesToFile(page.getContentData(), saveToPath.getAbsolutePath() + "/" + fileName);
      System.out.println("Stored: " + url);
    } catch (Exception e) {
      e.printStackTrace();
    }
  }

  public static void main(String[] args) throws Exception {
    if (args.length < 6) {
      System.out.println("Needed parameters: ");
      System.out.println("\t source address");
      System.out.println("\t numberOfCralwers (number of concurrent threads)");
      System.out.println("\t storageFolder (a folder for storing downloaded images)");
      System.out.println("\t rootFolder (it will contain intermediate crawl data)");
      return;
    }
    //网址
    String srcAddress = args[0];
    //线程数
    int numberOfCrawlers = Integer.parseInt(args[1]);
    //存储图像的文件夹
    String storageFolder = args[2];
    //内部扒的数据的存储文件夹
    String rootFolder = args[3];
    //需要的后缀
    String fileType = args[4];
    //扒的层数
    int crawlingDepth = Integer.valueOf(args[5]);
    

    CrawlConfig config = new CrawlConfig();

    config.setCrawlStorageFolder(rootFolder);
    config.setMaxDepthOfCrawling(crawlingDepth);

    /*
     * Since images are binary content, we need to set this parameter to
     * true to make sure they are included in the crawl.
     */
    config.setIncludeBinaryContentInCrawling(true);

    PageFetcher pageFetcher = new PageFetcher(config);
    RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
    RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
    CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
    controller.addSeed(srcAddress);
    
    GenericCrawler.configure(srcAddress, ".*(\\.(" + fileType + "?))$", storageFolder);

    controller.start(GenericCrawler.class, numberOfCrawlers); 
  }
  
}
