package cn.net.withub.dataCollector.web.collector;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatum;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler;
import cn.net.withub.dataCollector.common.editor.DateUtil;
import cn.net.withub.dataCollector.common.model.TCollectorConfig;
import cn.net.withub.dataCollector.common.model.TCollectorContent;
import cn.net.withub.dataCollector.common.model.TCollectorData;
import cn.net.withub.dataCollector.common.model.TCollectorElement;
import cn.net.withub.dataCollector.common.model.TCollectorLog;
import cn.net.withub.dataCollector.common.utils.CreateNewKey;
import cn.net.withub.dataCollector.common.utils.HTMLSpirit;
import cn.net.withub.dataCollector.web.service.CollectorService;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.log4j.Logger;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.web.context.WebApplicationContext;

/**
 * 最高法内网
 */
public class CollectorUtilsZgfnw extends BreadthCrawler {

    //@Resource
    private CollectorService collectorService;

    //@Resource
    private WebApplicationContext webAppliction;

    private TCollectorConfig tCollectorConfig;

    private Logger log = Logger.getLogger(this.getClass());
    private SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");

    public void CollectorUtils(CollectorService collectorService){
        this.collectorService = collectorService;
    }

    public CollectorUtilsZgfnw(String crawlPath, boolean autoParse, WebApplicationContext wac, TCollectorConfig config) {
        super(crawlPath, autoParse);
        webAppliction = wac;
        if(webAppliction !=null) collectorService = (CollectorService) webAppliction.getBean("collectorService");
        if(config != null) tCollectorConfig = config;
    }

    public void visit(Page page, CrawlDatums crawlDatums) {
        String url = page.url();
        //String html = page.html();
        //String contentType = page.contentType();
        String nextUrl = "";
        String title = "", fbsj = "", djcs = "", xxfb = "";//信息发布
        Date date = new Date();
        int compare = 0;
        log.info("--------------------------URL:" + url);
        try{
             if (page.matchUrl("http://192.0.0.9/views/.*/index\\d*.fhtml")) {//新闻列表
                 Elements elements = page.select(".list-tab tr");
                 boolean isNextPage = true;//是否抓取下一页
                 for (Element e : elements) {
                     String text = e.text();
                     //跳过表格tHead
                     if (text.contains("标题") && text.contains("点击量")) continue;
                     nextUrl = e.child(0).child(0).attr("abs:href");
                     djcs = e.child(1).text();
                     xxfb = e.child(2).text();
                     fbsj = e.child(3).text();
                     //title数字太多时  以省略号代替,存入数据库中的title为详情页面中的title(
                     title=e.child(0).text();
                     //部分太长标题  在正文的时候分成两行,有换行符,无法查出数据不能增量,故截取一部分来模糊查询
                     title = (title.length() > 6 ? title.substring(0, 6) : title);

                     //第一次只抓取20160101以后
                     /*Date minDate = sdf.parse("2018-06-30");
                     if(DateUtil.compareDate(fbsj,sdf.format(minDate))<0){
                         isNextPage = false;
                         continue;
                     }*/

                     try {
                         //第一次抓取所有
                         //crawlDatums.add(new CrawlDatum(nextUrl, "newsDetail").meta("xxfb",xxfb).meta("djcs",djcs).meta("fbsj",fbsj).meta("preUrL",url));
                         //后面都增量抓取
                         compare = DateUtil.compareDate(fbsj, sdf.format(tCollectorConfig.getFbsj()));
                         if(tCollectorConfig.getFbsj()==null || (tCollectorConfig.getFbsj()!=null && compare>0)){
                             crawlDatums.add(new CrawlDatum(nextUrl, "newsDetail").meta("xxfb",xxfb).meta("djcs",djcs).meta("fbsj",fbsj).meta("preUrL",url));
                         } else if(tCollectorConfig.getFbsj()!=null && compare==0){//判断已抓取过的最大发布日期，根据标题和发布时间进行对比，未抓取过的进行抓取
                             TCollectorData tCollectorData = (TCollectorData)collectorService.load(TCollectorData.class," fbsj='"+fbsj+"' and title LIKE'%"+title+"%'");
                             if(tCollectorData == null){
                                 crawlDatums.add(new CrawlDatum(nextUrl, "newsDetail").meta("xxfb",xxfb).meta("djcs",djcs).meta("fbsj",fbsj).meta("preUrL",url));
                             }else {
                                 isNextPage = false;
                             }
                         }else {
                             isNextPage = false;
                         }
                     }catch (Exception e1){
                         e1.printStackTrace();
                         //saveLog(title,fbsj,url,nextUrl,0);
                     }
                 }

                 //获取下一页
                 String src = "";
                 try{
                     if(isNextPage){
                         Elements pageElements = page.select(".page-tab tr td div a[href]");
                         for(Element element : pageElements){
                             String text = element.text().trim();
                             if(text.equals("下一页")){
                                 src = element.attr("abs:href");
                                 //测试时只抓取一页
                                 crawlDatums.add(new CrawlDatum(src));
                             }
                         }
                     }
                 }catch (Exception e){
                     saveLog("","",url,src,0);
                     e.printStackTrace();
                 }
             } else if (page.matchType("newsDetail")) {//单条新闻详情页面
                 Elements elements = page.select("#FontSize");
                 Elements imgs = elements.select("img[src]");
                 String id = new CreateNewKey().createId();
                 //遍历所有html中所有图片
                 for (Element img : imgs) {
                     String src = img.attr("abs:src");
                     //此处图片地址包含中文会报错
                     src = fmtUrl(src);
                     log.info("img url:\n"+src);
                     String imageName = src.substring(src.lastIndexOf("/")+1,src.length());
                     log.info("imageName:\n"+imageName);
                     String serverFilePath = "";
                     try {
                         //writeFile("D://test//"+imageName,page.getContent(),false);
                         serverFilePath = HttpClientUtils.getImage(src);
                         img.attr("src", serverFilePath);
                         TCollectorElement tCollectorElement = new TCollectorElement();
                         tCollectorElement.setId(new CreateNewKey().createId());
                         tCollectorElement.setDataId(id);
                         tCollectorElement.setTitle(imageName);
                         tCollectorElement.setUrl(src);
                         tCollectorElement.setWjlj(serverFilePath);
                         tCollectorElement.setSystime(new Date());
                         collectorService.save(tCollectorElement);
                     } catch (IOException e) {
                         log.error("出错src:" + src + ";父级路径:" + url);
                         e.printStackTrace();
                     }
                     //crawlDatums.add(new CrawlDatum(src,"img").meta("dataId",id));不在继续往下爬图片
                 }
                 String content = elements.html();
                 log.info("-----------" + content);
                 title = page.select("#tit").text();
                 //加头部标题
                 //String titleHtml = "<div class=\"detail-div\"><h1 id=\"news.tit\" style='text-align: center;'>" + title + "</h1>";
                 //content = titleHtml + content + "</div>";
                 fbsj = getStr(page.meta("fbsj"));
                 xxfb = getStr(page.meta("xxfb"));
                 djcs = getStr(page.meta("djcs"));
                 String preUrL = getStr(page.meta("preUrL"));
                 //String text = HTMLSpirit.getTextFromTHML(html);
                 //String text1 = Jsoup.parse(elements.html()).text();
                 //String text3 = elements.get(0).text();
                 String text = HTMLSpirit.getTextFromTHML(content);
                 int zt=1;
                 try {
                     //保存TCollectorData
                     TCollectorData tCollectorData = new TCollectorData();
                     tCollectorData.setId(id);
                     tCollectorData.setConfigId(tCollectorConfig.getId());
                     tCollectorData.setTitle(title);
                     tCollectorData.setFbsj(sdf.parse(fbsj));
                     tCollectorData.setXxly("");
                     tCollectorData.setXxfb(xxfb);
                     tCollectorData.setBjsh("");
                     tCollectorData.setDjcs(djcs);
                     tCollectorData.setWjgs("html");
                     tCollectorData.setUrl(url);
                     tCollectorData.setFydm(tCollectorConfig.getFydm());
                     tCollectorData.setFymc(tCollectorConfig.getFymc());
                     tCollectorData.setSystime(date);
                     collectorService.save(tCollectorData);
                     //保存TCollectorContent
                     TCollectorContent tCollectorContent = new TCollectorContent();
                     tCollectorContent.setId(new CreateNewKey().createId());
                     tCollectorContent.setContent(content);
                     tCollectorContent.setSystime(date);
                     tCollectorContent.setDataId(id);
                     tCollectorContent.setText(text);
                     collectorService.save(tCollectorContent);
                     //保存最大发布时间  此处有问题  多线程?
                     TCollectorConfig config = (TCollectorConfig)collectorService.load(TCollectorConfig.class," id='"+tCollectorConfig.getId()+"'");
                     if(config.getFbsj()==null || DateUtil.compareDate(fbsj,sdf.format(config.getFbsj()))>0){
                         config.setFbsj(sdf.parse(fbsj));
                         collectorService.save(config);
                     }
                     zt=1;
                 }catch (Exception e){
                     zt=0;
                     e.printStackTrace();
                 }finally {
                     saveLog(title,fbsj,preUrL,url,zt);
                 }
             }else if (page.matchType("img")) {//单条新闻详情页面图片
                 String dataId = getStr(page.meta("dataId"));
                 String imageName = url.substring(url.lastIndexOf("/")+1,url.length());
                 log.info("imageName:\n"+imageName);
                 String serverFilePath = "";
                 try {
                     //writeFile("D://test//"+imageName,page.getContent(),false);
                     serverFilePath = HttpClientUtils.getImage(url);
                     TCollectorElement tCollectorElement = new TCollectorElement();
                     tCollectorElement.setId(new CreateNewKey().createId());
                     tCollectorElement.setDataId(dataId);
                     tCollectorElement.setTitle(imageName);
                     tCollectorElement.setUrl(url);
                     tCollectorElement.setWjlj(serverFilePath);
                     tCollectorElement.setSystime(new Date());
                     collectorService.save(tCollectorElement);
                 } catch (IOException e) {
                     e.printStackTrace();
                 }

             }

        }catch (Exception e){
            e.printStackTrace();
        }
    }

    public void saveLog(String title,String fbsj,String firstUrl,String tqUrl,int zt){
        try {
            log.info("savelog........."+fbsj);
            TCollectorLog tCollectorLog = new TCollectorLog();
            tCollectorLog.setId(new CreateNewKey().createId());
            tCollectorLog.setConfigId(tCollectorConfig.getId());
            tCollectorLog.setIp(tCollectorConfig.getIp());
            tCollectorLog.setModuleName(tCollectorConfig.getModuleName());
            tCollectorLog.setFydm(tCollectorConfig.getFydm());
            tCollectorLog.setFymc(tCollectorConfig.getFymc());
            tCollectorLog.setTitle(title);
            try {
                tCollectorLog.setFbsj(sdf.parse(fbsj));
            }catch (Exception e) {
                e.printStackTrace();
            }
            tCollectorLog.setZt(zt);
            tCollectorLog.setFirstUrl(firstUrl);
            tCollectorLog.setTqUrl(tqUrl);
            tCollectorLog.setSysTime(new Date());

            collectorService.save(tCollectorLog);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
    String getStr(Object obj){
        return obj == null || "".equals(obj.toString()) ? "无" : obj.toString();
    }
    Integer getInt(Object obj){
        return obj == null || "".equals(obj.toString()) ? 0 : Integer.parseInt(obj.toString());
    }

    /**
     * 对爬虫url中包含空格和中文的url进行编码处理
     * @param url
     * @return
     * @throws UnsupportedEncodingException
     */
    public String fmtUrl(String url) throws UnsupportedEncodingException {
        if("".equals(url)) return "";
        //对中文部分编码
        Pattern p = Pattern.compile("([\u4e00-\u9fa5]+)");
        Matcher m = p.matcher( url );
        //url = url.replaceAll("\\+","%20");//空格转为%20
        url = url.replaceAll("\\s+","%20");//空格转为%20
        String mv = null;
        //List<String> list=new ArrayList<String>();
        if(m.find()){//此处默认只有一处中文,后面有需求在改
            mv = m.group(0);
            mv = URLEncoder.encode(mv, "utf-8");
            //list.add(getURLEncoderString(mv));
            //url=url.replace("[\\u4e00-\\u9fa5]", mv);
            url=url.replaceAll("[\u4e00-\u9fa5]+", mv);
            /*while (m.find()) {
                mv = m.group(0);
                mv = URLEncoder.encode(mv, "utf-8");
                //list.add(getURLEncoderString(mv));
                url=url.replaceAll("[\\u4e00-\\u9fa5]", mv);
            }*/
        }
        return url;
    }

    /**
     * 判断是否包含中文
     * @param str
     * @param str2  需要替换的中文字符
     * @return
     */
    public static boolean isContainChinese(String str,String str2) throws UnsupportedEncodingException {
        Pattern p = Pattern.compile("[\u4e00-\u9fa5]+");
        Matcher m = p.matcher(str);
        if (m.find()) {
            str2 = m.group(0);
            str2 = URLEncoder.encode(str2, "utf-8");
            return true;
        }
        return false;
    }

}
