package com.leaderment.timatt.controller;

import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Properties;
import java.util.concurrent.LinkedBlockingQueue;

import org.apache.log4j.Logger;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;

import com.leaderment.timatt.service.ICrawlerService;

import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import us.codecraft.webmagic.Spider;

@Api(description = "initUpsData",tags = "InitUpsData",basePath = "/initUpsData")
@RestController
@RequestMapping("/initUpsData")
public class InitUpsData {

	public static Logger log=Logger.getLogger(InitUpsData.class);
	public static Properties prop=new Properties();
	// 创建一个管理对应spider的map
	public static HashMap<Integer,Spider> spiderMap=new HashMap<>();
	// Redis的URL队列
	public static String crawlerURLListName="amzTrackingDomainList";
	
	public static String upsTrackingList = "upsTrackingList";
	// 爬虫线程数量
	Integer threadCount;
	
	@Autowired
	ICrawlerService crawlerService;
	@ApiOperation(value = "人工初始化tracking信息")
    @RequestMapping(value = "/iniTracking", method = RequestMethod.GET)
    @ResponseBody
    public String iniTracking() {
           	String result = "success";
          //读取config.properties
    		InputStream in=this.getClass().getClassLoader().getResourceAsStream("config.properties");
    		try {
    			prop.load(in);
    		} catch (IOException e) {
    			e.printStackTrace();
    		}
    		// 爬虫线程数量
    		threadCount=Integer.parseInt(prop.getProperty("threadCount"));
    		// 设置一个控制线程数的阻塞队列
    		//从数据库拼接页面的url,将需要爬取的页面以及typeId放入数据库中.
    		Thread th = new Thread(new ThreadImpl());
    		th.start();
            return result;
    }
	private class ThreadImpl implements Runnable{
		@Override
		public void run() {
			crawlerService.startCrawler();
		}
	}

	
}
