package com.ndood.spider.tmall.web;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

import com.ndood.core.render.SeleniumDownloader;
import com.ndood.core.render.impl.PhantomJsDriverPool;
import com.ndood.core.util.UrlStringUtil;
import com.ndood.spider.tmall.action.TmallAction;
import com.ndood.spider.tmall.bean.TmallGoods;
import com.ndood.spider.tmall.dao.TmallGoodsDao;
import com.ndood.spider.tmall.pipeline.TmallPipeline;
import com.ndood.spider.tmall.processor.TmallPageProcessor;

import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.selector.Html;
import us.codecraft.webmagic.selector.Selectable;

/**
 * 爬虫控制器
 */
@RestController
@RequestMapping("tmall")
public class TmallSpiderController {

	@Autowired
	private TmallPipeline tmallPipeline;

	private AtomicBoolean isOk = new AtomicBoolean(false);
	
	@Autowired
	private TmallGoodsDao tmallGoodsDao;
	
	//private static final String CONFIG_DIR = "C:/Users/Administrator/Desktop/lala's peach/";
	private static final String CONFIG_DIR = "C:/Users/Administrator/Desktop/艾舒客旗舰店/";
	
	/**
	 * 爬取一个静态文件上的连接
	 * @param cat
	 * @param catPath
	 * @param index
	 * @return
	 * @throws Exception 
	 */
	@RequestMapping("/spider")
	public String spider(String catPath) throws Exception {
		// Step1: 读取静态文件
		String fileStr = readFile(CONFIG_DIR + catPath +".txt");
		
		// Step2: 提取出商品url，并假加入到请求队列
		Html html = new Html(fileStr);
		List<Selectable> items = html.xpath("//*[@id='J_ShopSearchResult']/div[@class='skin-box-bd']/div[@class='J_TItems']/div/dl[@class='item']").nodes();
		String targetUrls = "";
		for (int i = 0; i < items.size(); i++) {
			Selectable item = items.get(i);
			String url = item.xpath("//*/dt/a/@href").get();
			targetUrls+= "https:" +url + ",";
		}
		targetUrls = targetUrls.substring(0, targetUrls.lastIndexOf(","));
				
		// Step3: 开启爬虫，依次爬取商品
		Spider spider = Spider.create(new TmallPageProcessor());

		String cat = catPath;
		if(catPath.contains("-")){
			cat = catPath.split("-")[1];
			if(Character.isDigit(cat.charAt(cat.length()-1))){
				cat = cat.substring(0,cat.length()-1);
			}
		}

		String[] urls = targetUrls.toString().split(",");
		for (String stringUrl : urls) {
			String id = UrlStringUtil.getParamsMap(stringUrl).get("id");
			TmallGoods goods = new TmallGoods();
			goods.setId(id);
			goods.setCat(cat);
			goods.setCatPath(catPath);
			goods.setUrl(stringUrl);
			try {
				TmallGoods temp = tmallGoodsDao.get(id);
				if(temp==null){
					tmallGoodsDao.insert(goods);
					// 只有新任务才需要爬详情页
					spider.addUrl(stringUrl);
				}else{
					// 如果该商品已包含该分类（防止重复执行添加）
					if(temp.getCat().contains(cat.trim())||temp.getCatPath().contains(catPath.trim())){
						continue;
					}
					// 如果商品已经存在则直接更新分类
					temp.setCat(temp.getCat()+","+cat);
					temp.setCatPath(temp.getCatPath()+","+catPath);
					tmallGoodsDao.update(temp);
				}
			} catch (Exception e) {
				e.printStackTrace();
			}
		}
		
		spider.addPipeline(tmallPipeline);
		spider.setDownloader(new SeleniumDownloader(5000, new PhantomJsDriverPool(), new TmallAction()));
		spider.thread(3);
		spider.setExitWhenComplete(true);
		spider.start();
		while (!isOk.get()) {
			Thread.sleep(1000);
		}
		isOk.set(false);
		return "ok";
	}
	
	@RequestMapping("/modify")
	public String modify(String catPath) throws Exception {
		// Step1: 查询出所有name为空的商品
		String targetUrls = "";
		List<TmallGoods> goods = tmallGoodsDao.getListByCatPathContains(catPath);
		for (TmallGoods tmallGoods : goods) {
			if(tmallGoods.getName()==null||tmallGoods.getPrice()==null){
				targetUrls = targetUrls + tmallGoods.getUrl() + ",";
			}
		}
		targetUrls = targetUrls.substring(0, targetUrls.lastIndexOf(","));

		// Step2: 开启爬虫，依次爬取商品
		Spider spider = Spider.create(new TmallPageProcessor());
		spider.addUrl(targetUrls.toString().split(","));
		spider.addPipeline(tmallPipeline);
		spider.setDownloader(new SeleniumDownloader(5000, new PhantomJsDriverPool(), new TmallAction()));
		spider.thread(3);
		spider.setExitWhenComplete(true);
		spider.start();
		while (!isOk.get()) {
			Thread.sleep(1000);
		}
		isOk.set(false);
		return "ok";
	}

	/**
	 * 读取静态html文件内容
	 * @param fileName
	 * @return
	 */
	private String readFile(String fileName) {
		File myFile = new File(fileName);
		if (!myFile.exists()) {
			System.err.println("Can't Find " + fileName);
		}
		try {
			BufferedReader in = new BufferedReader(new FileReader(myFile));
			StringBuilder sb = new StringBuilder();
			String str = null;
			while ((str = in.readLine()) != null) {
				sb.append(str);
			}
			in.close();
			return sb.toString();
		} catch (IOException e) {
			e.getStackTrace();
		}
		return null;
	}

	/**
	 * 关闭爬虫
	 * @return
	 */
	@RequestMapping("/stop")
	public String stop() {
		isOk.set(true);
		return "stoped";
	}
}
