package com.spider.silence.downloader;

import com.alibaba.fastjson.JSONObject;
import com.spider.silence.abstarct.downloader.AbstractCommonDownLoader;
import com.spider.silence.task.ChengDuWanBaoTask;
import com.spider.silence.task.SiChuanRenShiTask;
import com.spider.silence.task.manager.CommonTaskManager;
import com.spider.silence.utils.DateUtils;
import com.spider.silence.utils.FileUtils;
import com.spider.silence.utils.MD5Utils;
import com.spider.silence.utils.PropertiesUtil;
import org.apache.log4j.Logger;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import us.codecraft.webmagic.Page;

import java.io.File;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

/**
 * Created by admin on 2017/7/5.
 * 四川人事考试网下载器
 */
public class SiChuanRenShiDownloader extends AbstractCommonDownLoader {

    private Logger logger =Logger.getLogger(SiChuanRenShiDownloader.class);

    private Map<String,File> files = null;

    public Map<String, File> getFiles() {
        return files;
    }

    public final String section = "http://www.scpta.gov.cn/news/news.aspx";


    /**
     * 生成爬虫任务
     */
    public void generateTask(){
        while (true) {
            try {
                Page page = getHtml(section, true);
                if(null != page && null != page.getHtml() && 200 == page.getStatusCode()) {
                    Document document = page.getHtml().getDocument();
                    Elements elements = document.select("div[class=\"allnews_btc\"] > ul > li");
                    if(elements.size() > 0) {
                        for(int j = 0; j < elements.size(); j++) {
                            String url = elements.get(j).select("a").attr("href");
                            SiChuanRenShiTask task = new SiChuanRenShiTask(url);
                            CommonTaskManager.add("SiChuanRenShi", task);
                        }
                    }
                    break;
                }
            } catch (Exception e) {
                try {
                    Thread.sleep(3000);
                } catch (InterruptedException e1) {
                    e1.printStackTrace();
                }
                e.printStackTrace();
                continue;
            }
        }
    }


    public JSONObject crawlItem(String url) throws Exception {
        JSONObject item = new JSONObject();
        JSONObject data;
        //该字段用来标识该爬虫是否成功完成
        boolean success = false;
        long startTime = System.currentTimeMillis();
        long nowTime = startTime;
        while (nowTime - startTime < Long.valueOf(PropertiesUtil.getVal("SiChuanRenShi.timeOut"))) {
            //假如有异常, 就重新爬取，并将以前的数据重置
            data = new JSONObject();
            data.put("url", url);
            nowTime = System.currentTimeMillis();
            try {
                Page page = getHtml(url,true);
                if(page != null && page.getStatusCode() == 200) {
                    Document document = page.getHtml().getDocument();
                    String title = document.select("div[class=\"weizhi\"] > span").get(0).text();
                    title = title.substring(title.lastIndexOf(">")+1).trim();
                    data.put("title", title);
                    String publish_time = document.select("div[class=\"content\"] > div").get(0).text().trim();
                    publish_time = publish_time.substring(publish_time.lastIndexOf("发布时间：")+5).replace(" ", "").replace(" ", "").trim();
                    String[] publish_time_array = publish_time.split("/");
                    publish_time = publish_time_array[0] + "-" + String.format("%02d", Integer.parseInt(publish_time_array[1])) + "-" + String.format("%02d", Integer.parseInt(publish_time_array[2])) + " 00:00:00";
                    publish_time = DateUtils.strToFormatStr(publish_time);
                    data.put("publish_time", publish_time); //新闻发布时间
                    data.put("spider_time", new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date()));//爬取时间

                    Elements main = document.select("div[class=\"content\"] p");
                    if (main.size() > 0) {
                        List<String> img_urls = new ArrayList<>();
                        StringBuilder content = new StringBuilder();
                        for(int i = 0; i < main.size(); i++) {
                            Element temp = main.get(i);
                            if(temp.select("img").size() > 0) {
                                String img = temp.select("img").get(0).attr("src");
                                img_urls.add(img);
                            } else {
                                content.append(temp.text().trim());
                            }
                        }
                        data.put("img_urls", img_urls.toArray());
                        data.put("content", content);
                    }
                    //设置html文件的命名格式
                    String fileName = "SiChuanRenShi" + "_" + DateUtils.dateToTimestamp( DateUtils.parseDate(publish_time)).getTime() + "_" + MD5Utils.getHash3(url.replaceAll("#.*", ""), "MD5");
                    String publishDate = DateUtils.formatDate(DateUtils.parseDate(publish_time)).replace("-","-");
                    String all_fileName = "四川人事网_speeches_" + publishDate + "_" + fileName;
                    //抓取第一页的网页源码
                    File file = FileUtils.createTempFile(all_fileName + "_pageNo_1", document.html().toString());
                    files = new ConcurrentHashMap<>();
                    files.put(all_fileName + "_pageNo_1", file);
                    item.put("data", data);
                    success = true;
                    break;
                }
            } catch (Exception e) {
                //e.printStackTrace();
                //等三秒继续爬
                Thread.sleep(1500);
                logger.info(e);
                System.err.println("爬虫过程出现异常, 尝试重新爬取");
                continue;
            }
        }
        if(success == true) {
            return item;
        } else {
            throw new Exception(url+"爬取失败");
        }
    }




}
