package cn.net.withub.dataCollector.web.collector;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatum;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.net.withub.dataCollector.common.model.TCollectorConfig;
import cn.net.withub.dataCollector.common.model.TCollectorData;
import cn.net.withub.dataCollector.common.utils.CreateNewKey;
import cn.net.withub.dataCollector.common.utils.HTMLSpirit;
import cn.net.withub.dataCollector.web.service.CollectorService;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.regex.Pattern;
import net.sf.json.JSONObject;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.web.context.WebApplicationContext;

/**
 * 外网要闻
 */
public class CollectorUtilsTx extends CrawlerBase {
    public CollectorUtilsTx(String crawlPath, boolean autoParse, WebApplicationContext wac, TCollectorConfig config) {
        super(crawlPath, autoParse, wac, config);
        webAppliction = wac;
        if (webAppliction != null) collectorService = (CollectorService) webAppliction.getBean("collectorService");
        if (config != null) tCollectorConfig = config;
        sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
    }
    public void visit(Page page, CrawlDatums crawlDatums) {
        String url = page.url();
        page.charset("GBK");//腾讯新闻不是utf-8编码???  utf-8全是乱码....
        //String html1 = page.response().decode("GBK");
        String nextUrl = "";
        String title = "", fbsj = "", djcs = "", xxly = "";//信息来源
        log.info("--------------------------URL:" + url);
        try {
            if (page.matchUrl("http://news.qq.com/")) { //腾讯新闻
                String modu = page.select(".title  .current a").text().trim();
                if (!"要闻".equals(modu)) {
                    log.info(modu);
                    return;
                }
                //第一页默认要闻
                Elements elements = page.select(" .major .Q-tpList .text em a");
                for (Element e : elements) {
                    nextUrl = e.attr("abs:href");
                    title = e.text();
                    if (Pattern.matches("http://new.qq.com/zt/template.*", nextUrl)) {//专题新闻  爬取不到数据
                        //crawlDatums.add(new CrawlDatum(nextUrl, "ztNews").meta("title", title));
                        continue;
                    } /*else if (Pattern.matches("http://new.qq.com/.*.html", nextUrl)) {
                        //title = e.text();
                    }*/
                    TCollectorData tCollectorData = (TCollectorData) collectorService.load(TCollectorData.class, "title ='" + title + "'");
                    if(tCollectorData==null){
                         crawlDatums.add(new CrawlDatum(nextUrl, "newsDetail").meta("title", title).meta("preUrL", url));
                    }
                }
            } else if (page.matchType("newsDetail")) {//单条新闻详情页面
                //Elements element = page.select("#LeftTool .left-stick-wp");//js加载的   无法获取
                Elements element = page.select("script");
                String paramStr = "";
                for (Element e : element) {
                    if (e.html().contains("window.DATA")) {
                        paramStr = e.html();
                        break;
                    }
                }
                Elements contentEle = page.select(".content-article");
                if(contentEle.size()==0)return;
                String content = contentEle.outerHtml();
                String text = HTMLSpirit.getTextFromTHML(content);
                if("".equals(text.trim())) return;//无文字  基本全是视频
                paramStr = paramStr.substring(14);//去掉window.DATA=
                JSONObject object = JSONObject.fromObject(paramStr);
                fbsj = getStr(object.get("pubtime"));
                xxly = getStr(object.get("media"));
                title = getStr(page.meta("title"));
                Elements imgs = contentEle.select("img[src]");
                String id = new CreateNewKey().createId();
                //遍历所有html中所有图片
                for (Element img : imgs) {
                    String src = img.attr("abs:src");
                    //此处图片地址包含中文会报错
                    src = fmtUrl(src);
                    log.info("img url:\n" + src);
                    //腾讯网图片地址都是以0结尾.无图片格式   此处保存抛出异常
                    //String imageName = src.substring(src.lastIndexOf("/") + 1, src.length());
                    String imageName = src.substring(src.indexOf("/0/")+3, src.lastIndexOf("/"))+".jpg";
                    String serverFilePath = "";
                    try {
                        //serverFilePath = HttpClientUtils.getImage(src,"1");
                        //img.attr("src", serverFilePath);
                        serverFilePath = FtpUtil.getImage(src, "1");
                        saveElement(id, imageName, src, serverFilePath);
                    } catch (IOException e) {
                        log.error("出错src:" + src + ";父级路径:" + url);
                        e.printStackTrace();
                    }
                }
                String preUrL = getStr(page.meta("preUrL"));
                int zt = 1;
                try {
                    //保存TCollectorData
                    saveData(id, title, fbsj, xxly, "html", url, "", "", "", "");
                    //保存TCollectorContent
                    saveContent(id,content,text);
                    //保存最大发布时间
                    saveMaxFbsj(fbsj);
                    zt = 1;
                } catch (Exception e) {
                    zt = 0;
                    e.printStackTrace();
                } finally {
                    saveLog(title, fbsj, preUrL, url, zt);
                }
            } else if (page.matchType("ztNews")) {
                title = getStr(page.meta("title"));
                Elements elements = page.select(".content .item-box ul li h3 a");
                for (Element e : elements) {
                    String ttt = e.text();
                    nextUrl = e.attr("abs:href");
                    if (title.equals(e.text())) {
                        crawlDatums.add(new CrawlDatum(nextUrl, "newsDetail").meta("title", title).meta("preUrL", url));
                    }
                }
            }

        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
