package com.kinth.work.webmagic.huicong;

import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.*;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.pipeline.FilePipeline;
import us.codecraft.webmagic.processor.PageProcessor;

/**
 * 
 * <p>
 * Title: 处理分页
 * </p>
 * <p>
 * Description: 1.
 * http://s.hc360.com/?w=%CE%E5%BD%F0&mychannel=enterprise，这是跳转到的页面。为了处理分页，需要
 * 先把总的页数提取出来。而GetBigUrl2是处理这个问题的。他会根据1中的链接，逐个打开并提取分页数目。这里分页数目需要
 * 注意，根据网站的js代码。分页数目最大是100.也许根据总的条目数是超过一百页的。但是这个网站设置了只能到100.所以我们 也只能取到一百页。
 * </p>
 * 
 * @author kaikai
 * @2017年3月20日
 */
public class GetBigUrl2 implements PageProcessor,Runnable
{
	// 一个存储ur
	private static HashMap<String, String> urlMap = new HashMap<String, String>();
//	private String urlNow;// 当下的url，注意不是static
//	private String industry;// 当下的分类，注意不是static。和urlNow想相互对应。
	public static final int NUM_ONCE = 10;// 每次从数据库里面取出的值

	// 起始url
	private static String startUrl;

	public static String getStartUrl()
	{
		return startUrl;
	}

	public static void setStartUrl(String startUrl)
	{
		GetBigUrl2.startUrl = startUrl;
	}

	private Site site = Site.me()
			.setUserAgent("Mozilla/5.0 (X11; U; Linux i686; zh-CN; rv:1.9.1.2) " + "Gecko/20090803").setRetryTimes(3)
			.setSleepTime(100);

	@Override
	public void process(Page page)
	{
		String pageAmount = page.getHtml()
				.xpath("/html/body/div[@class='s-layout']" + "/div[@class='s-mod-bcrumbs']/span/em/text()").toString();

		if (pageAmount != null)
		{
			int pageAmount2 = Integer.parseInt(pageAmount);
			int pageAmount3 = pageAmount2 / 40 + 1;

			String urlNow = page.getUrl().toString();
			ArrayList<String> industryList = dbUtil.queryIndustry("huicong_url1", urlNow);
			String industry = null;
			if(industryList.size() > 0)
			{
				industry = industryList.get(0);
			}
			
			//System.out.println("GetBigUrl2: url,industry: "+urlNow+","+industry);
			UrlHuiCong urlHuiCong = new UrlHuiCong(urlNow, industry);
			dbUtil.insertUrl(urlHuiCong, "huicong_url2");

			if (pageAmount3 > 100)
			{
				pageAmount3 = 100;
			}
			//为了测试，先用5来代替
			for (int i = 2; i <= pageAmount3; i++)
			//for (int i = 2; i <= 5; i++)
			{
				String urlnext = urlNow + "&ee=" + i;
				UrlHuiCong urlHuiCong2 = new UrlHuiCong(urlnext, industry);
				//System.out.println("GetBigUrl2: url,industry: "+urlnext+","+industry);
				dbUtil.insertUrl(urlHuiCong2, "huicong_url2");

			}
		}
	}

	@Override
	public Site getSite()
	{
		return site;
	}

	/**
	 * 执行整个类。并把结果存到静态set里面。
	 */
	public static void runSpider()
	{
		System.out.println("爬取公司列表链接开始");
		// 从记录文件里面取出记录
		int rank = FileUtil.getBigUrl();
		// 取出，每次10个
		ArrayList<UrlHuiCong> urlList = dbUtil.queryUrl("huicong_url1", rank, NUM_ONCE);
        //System.out.println("GetBigUrl2: rank: "+rank);
        GetBigUrl2 getBigUrl2 = new GetBigUrl2();
        Spider spider = Spider.create(getBigUrl2);
        
		while (true)
		{
			if (urlList.size() == 0)
			{
				try
				{
					Thread.sleep(1000 * 10);
					System.out.println("GetBigUrl2: 当下分类列表已经爬取完毕。已经爬取完毕。如果公司列表和公司都爬取完毕。请关闭程序。");
				} catch (InterruptedException e)
				{
					// TODO Auto-generated catch block
					e.printStackTrace();
				}
			}else
			{
				for (UrlHuiCong urlHuiCong : urlList)
				{
					String startUrlTemp = urlHuiCong.getUrl();
                    spider.addUrl(startUrlTemp).run();
            
				}
			}


			int nextRank = FileUtil.getBigUrl() + urlList.size();
			FileUtil.writeBigUrl(nextRank);
			//System.out.println("GetBigUrl2: rank2: "+nextRank);
			urlList = dbUtil.queryUrl("huicong_url1", nextRank, NUM_ONCE);
		}

	}

	public static void main(String[] args)
	{
		GetBigUrl2.runSpider();
	}

	@Override
	public void run()
	{
		// TODO Auto-generated method stub
		GetBigUrl2.runSpider();
	}
}