package com.spider.silence.downloader;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.spider.silence.abstarct.downloader.AbstractCommonDownLoader;
import com.spider.silence.task.SinaSiChuanTask;
import com.spider.silence.task.WangYiXinWenTask;
import com.spider.silence.task.manager.CommonTaskManager;
import com.spider.silence.utils.DateUtils;
import com.spider.silence.utils.FileUtils;
import com.spider.silence.utils.MD5Utils;
import com.spider.silence.utils.PropertiesUtil;
import org.apache.log4j.Logger;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import us.codecraft.webmagic.Page;

import java.io.File;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

/**
 * Created by admin on 2017/7/7.
 * 网易新闻下载器
 */
public class WangYiXinWenDownloader extends AbstractCommonDownLoader {

    private Logger logger = Logger.getLogger(WangYiXinWenDownloader.class);

    private Map<String,File> files = null;

    public Map<String, File> getFiles() {
        return files;
    }

    private final String[] section = {
            "http://temp.163.com/special/00804KVA/cm_guonei.js?callback=data_callback", //国内
            "http://temp.163.com/special/00804KVA/cm_guoji.js?callback=data_callback", //国际
            "http://temp.163.com/special/00804KVA/cm_shehui.js?callback=data_callback", //社会
            "http://temp.163.com/special/00804KVA/cm_war.js?callback=data_callback",//军事
            "http://temp.163.com/special/00804KVA/cm_hangkong.js?callback=data_callback&a=2"//航空
    };



    public void generateTask(){
        for(int i = 0; i < section.length; i++) {
            long startTime = System.currentTimeMillis();
            long nowTime = startTime;
            while (nowTime - startTime < Long.valueOf(PropertiesUtil.getVal("WangYiXinWen.timeOut"))) {
                try {
                    Page page = getHtml(section[i]);
                    if(page != null && page.getHtml() != null && page.getStatusCode() == 200) {
                        Document document = page.getHtml().getDocument();
                        String result = document.getElementsByTag("body").text();
                        result = new String(result.getBytes("ISO-8859-1"), "UTF-8");
                        result = result.substring(14).trim();
                        result = result.replace(")", "").trim();
                        JSONArray array = JSON.parseArray(result);
                        for (int j = 0; j < array.size(); j++) {
                            JSONObject object = (JSONObject)array.get(j);
                            String url = (String)object.get("docurl");
                            WangYiXinWenTask task =new WangYiXinWenTask(url);
                            CommonTaskManager.add("WangYiXinWen", task);
                        }
                        break;
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
    }

    public JSONObject crawlItem(String url) throws Exception {
        JSONObject item = new JSONObject();
        JSONObject data;
        //该字段用来标识该爬虫是否成功完成
        boolean success = false;
        long startTime = System.currentTimeMillis();
        long nowTime = startTime;
        while (nowTime - startTime < Long.valueOf(PropertiesUtil.getVal("WangYiXinWen.timeOut"))) {
            //假如有异常, 就重新爬取，并将以前的数据重置
            data = new JSONObject();
            data.put("url", url);
            nowTime = System.currentTimeMillis();
            try {
                Page page = getHtml(url,true);
                if(null != page && page.getStatusCode() == 200 && null != page.getHtml().getDocument()) {
                    Document document = page.getHtml().getDocument();
                    String title = document.title();
                    data.put("title", title);
                    String publish_time = document.select("#epContentLeft div[class=\"post_time_source\"]").get(0).text().substring(0, 19).trim();
                    publish_time = DateUtils.strToFormatStr(publish_time);
                    data.put("publish_time", publish_time); //新闻发布时间
                    data.put("spider_time", new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date()));//爬取时间
                    Elements content_elements = document.select("#epContentLeft div[class=\"post_body\"] div[class=\"post_text\"] p");
                    List<String> img_urls = new ArrayList<>();
                    StringBuilder content = new StringBuilder();
                    for(int i = 0; i < content_elements.size(); i++) {
                        Element e = content_elements.get(i);
                        if(e.select("img").size() > 0) {
                            img_urls.add(e.select("img").get(0).attr("src"));
                        } else {
                            content.append(e.text());
                        }
                    }
                    data.put("img_urls", img_urls.toArray());
                    data.put("content", content);
                    String newid = url.substring(url.lastIndexOf("/")+1, url.length()-5);
                    data.put("id", newid);
                    String count_url ="http://sdk.comment.163.com/api/v1/products/a2869674571f77b5a0867c3d71db5856/threads/" + newid + "?ibc=jssdk";
                    Page count_page = getHtml(count_url, true);
                    String count_string = count_page.getRawText().trim();
                    JSONObject count_object = JSON.parseObject(count_string);
                    int reply_count = (Integer) count_object.get("tcount");
                    data.put("reply_count", reply_count);
                    data.put("read_count", (Integer)count_object.get("cmtAgainst") + (Integer)count_object.get("cmtVote") + (Integer)count_object.get("rcount"));
                    JSONArray reply_array = new JSONArray();
                    //总共的回复页数
                    int total_page = (reply_count+29) / 30;
                    for(int p = 0; p < total_page; p++) {
                        String comment_url = "http://comment.news.163.com/api/v1/products/a2869674571f77b5a0867c3d71db5856/threads/"+ newid +"/comments/newList?offset="+ p*30 +"&limit=30";
                        //如果有异常则重复10次, 如果没有异常则break
                        for(int q = 0; q < 10; q++) {
                            try {
                                Page comment_page = getHtml(comment_url, true);
                                String comment_string = comment_page.getRawText().trim();
                                JSONObject comment_object = JSON.parseObject(comment_string);
                                JSONArray commentIds_array = (JSONArray) comment_object.get("commentIds");
                                JSONObject comments_object = (JSONObject) comment_object.get("comments");
                                if(!commentIds_array.isEmpty()) {
                                    for(int i = 0; i < commentIds_array.size(); i++) {
                                        JSONObject temp = new JSONObject();
                                        String comment_id = (String) commentIds_array.get(i);
                                        JSONObject comment_item = (JSONObject) comments_object.get(comment_id);
                                        temp.put("like_count", comment_item.get("vote"));
                                        temp.put("dislike_count", comment_item.get("against"));
                                        temp.put("content", comment_item.get("content"));
                                        temp.put("publish_user", ((JSONObject) comment_item.get("user")).get("nickname"));
                                        temp.put("publish_user_id", ((JSONObject) comment_item.get("user")).get("id"));
                                        temp.put("publish_time", comment_item.get("createTime"));
                                        temp.put("ancestor_id", newid);
                                        temp.put("publish_user_photo", ((JSONObject) comment_item.get("user")).get("avatar"));
                                        reply_array.add(temp);
                                    }
                                }
                                break;
                            } catch (Exception e) {
                                Thread.sleep(3000);
                                continue;
                            }
                        }
                    }
                    data.put("reply_nodes", reply_array);
                    //设置html文件的命名格式
                    String fileName = "WangYiXinWen" + "_" + DateUtils.dateToTimestamp( DateUtils.parseDate(publish_time)).getTime() + "_" + MD5Utils.getHash3(url.replaceAll("#.*", ""), "MD5");
                    String publishDate = DateUtils.formatDate(DateUtils.parseDate(publish_time)).replace("-","-");
                    String all_fileName = "网易新闻_speeches_" + publishDate + "_" + fileName;
                    //抓取第一页的网页源码
                    File file = FileUtils.createTempFile(all_fileName + "_pageNo_1", document.html().toString());
                    files = new ConcurrentHashMap<>();
                    files.put(all_fileName + "_pageNo_1", file);
                    item.put("data", data);
                    success = true;
                    break;
                }
            } catch (Exception e) {
                Thread.sleep(1500);
                //e.printStackTrace();
                System.err.println("爬虫过程出现异常, 尝试重新爬取");
                continue;
            }
        }
        if(success == true) {
            return item;
        } else {
            throw new Exception(url+"爬取失败");
        }
    }
}
