package com.dm.webCrawler.project.buss.service;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.rocks.BreadthCrawler;
import cn.hutool.core.date.DateUtil;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.util.ZipUtil;
import com.dm.webCrawler.common.utils.AsponseUtil;
import com.dm.webCrawler.common.utils.StringUtils;
import com.dm.webCrawler.common.utils.file.XmlDetailUtil;
import com.dm.webCrawler.project.mapper.CrawlerIndexMapper;
import com.dm.webCrawler.project.mode.ZipDetialModel;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import javax.xml.bind.JAXBException;
import java.io.File;
import java.io.IOException;
import java.util.*;

/**
 * 印度联合新闻社内容抽取
 */
@Component
public class UniindiaCrawler extends BreadthCrawler {
    @Value("${lb.work-back:news}")
    private String workBack="news";
    @Value("${lb.out-zip-path:outZipPath}")
    private String outZipPath="outZipPath";
    private String baseDir;
    private String typeName="uniindia";
//    private String listRegex="http://www.uniindia.com/news/india/page_\\d+/";
    String startPage="http://www.uniindia.com/news/india/";
    List<String> seeds=new ArrayList<String>(){{
        add(startPage); //只爬印度新闻
    }};

    @Autowired
    private CrawlerIndexMapper crawlerIndexMapper;

    public UniindiaCrawler(){
        this("Uniindia",true);
    }
    /**
     * 构造一个基于RocksDB的爬虫
     * RocksDB文件夹为crawlPath，crawlPath中维护了历史URL等信息
     * 不同任务不要使用相同的crawlPath
     * 两个使用相同crawlPath的爬虫并行爬取会产生错误
     *
     * @param crawlPath RocksDB使用的文件夹
     * @param autoParse 是否根据设置的正则自动探测新URL
     */

    public UniindiaCrawler(String crawlPath, boolean autoParse) {
        super(crawlPath, autoParse);
//        setRequester(new IndiatimesRequester()); //此处有问题
        for (String seed : seeds) {
            this.addSeedAndReturn(seed).type("list");
        }
        this.addRegex("http://www.uniindia.com/[\\s\\S]*?/india/news/+\\d+.html");
//        this.addRegex(listRegex);
        setThreads(10);
        getConf().setTopN(10000);
        baseDir=workBack+"/"+typeName;

    }


    @Override
    public void visit(Page page, CrawlDatums next) {

        System.out.println(page.url());
        if(seeds.contains(page.url())){
            Elements pageNodes = page.select("#ctl00_ContentPlaceHolder1_catnewsid div.holder a:not(.jp-current)");
            if(pageNodes!=null && pageNodes.size()>0){
                for (Element pageNode : pageNodes) {
                    String href=pageNode.attr("href");
                    if(StringUtils.isNotEmpty(href))
                        next.add(startPage+href).type("list");
                }
            }
            return;
        }else if(!page.matchType("list")){
            printDatas(page,next);
        }

    }

    private void printDatas(Page page, CrawlDatums next){

        ZipDetialModel model=new ZipDetialModel();//输出
        model.setHqsj(DateUtil.format(new Date(),"yyyy-MM-dd HH:mm:ss"));
        String url=page.url();
        String id= StringUtils.regexFind(url,"(\\d+).html",1);
        if(crawlerIndexMapper.hasIndex(id,typeName)>=1){
            return;
        }
        model.setWybs(id);
        String thisnewBaseDir=baseDir+"/"+id;
        new File(thisnewBaseDir).mkdirs();
        model.setLy("印度联合新闻社");
        model.setFbmt("印度联合新闻社");
        model.setYz("英文");
        model.setGj("印度");
        model.setDq("南亚");
        model.setLydz(url);
        //编辑人没有

        String bt = page.select("#ctl00_ContentPlaceHolder1_storyContainer h1.storyheadline").first().text();
        model.setBt(bt);
//        System.out.println(ChromeTranslationUtil.translation("zh_CN",title));
        String dateStr=page.select("#ctl00_ContentPlaceHolder1_storyContainer span.storydate").first().text();
        try {
            model.setFbsj(DateUtil.format(dateStrConvert(dateStr),"yyyy-MM-dd HH:mm:ss"));
        }catch (Exception e){
            e.printStackTrace();
            System.out.println("获取发布时间失败!");
        }
        //正文文件
        page.select("#ctl00_ContentPlaceHolder1_storyContainer span.storydetails div.google-auto-placed.ap_container").remove();
        String contentHtml=page.select("#ctl00_ContentPlaceHolder1_storyContainer span.storydetails",0).html();
        File wordFile = buildWordFile(contentHtml, thisnewBaseDir, id);
        //添加正文文件
        model.getWJ().add(wordFile.getName());

//        try {
//            //翻译文件
//            String chineseStr=page.select("#ctl00_ContentPlaceHolder1_storyContainer span.storydetails",0).text();
//            String chineseFilename=id + "_zh_CN.txt";
//            File chineseFile=new File(thisnewBaseDir + "/" +chineseFilename);
//            chineseFile.createNewFile();
//            FileUtil.writeString(chineseStr,chineseFile,"UTF-8");
//            model.getZwwjlb().add(new ZipDetialModel.ZwwjlbClass(chineseFilename));
//        } catch (IOException e) {
//            e.printStackTrace();
//            System.out.println("生成翻译文件失败！");
//        }

        //输出xml文件
        try {
            XmlDetailUtil.createXML(model,thisnewBaseDir+"/detail.xml");
            //输出压缩文件
            String outPath=outZipPath+"/"+typeName+"_"+id+".zip";
            String basDirAbs=new File(thisnewBaseDir).getAbsolutePath();
            ZipUtil.zip(basDirAbs,new File(outPath).getAbsolutePath());
            FileUtil.del(basDirAbs);
            crawlerIndexMapper.appendIndex(id,typeName);
        } catch (JAXBException e) {
            e.printStackTrace();
            System.out.println("创建压缩文件失败");
        }

    }

    /**
     * 构建word文件
     * @param contentHtml
     * @param thisnewBaseDir
     * @param id
     * @return
     */
    private File buildWordFile(String contentHtml,String thisnewBaseDir,String id){
        File htmlPath = new File(thisnewBaseDir + "/" + id + ".html");
        try {
            htmlPath.createNewFile();
        } catch (IOException e) {
            e.printStackTrace();
            System.out.println("创建html文件失败！");
        }
        contentHtml="<html lang=\"en\"><head></head><body>"+contentHtml+"</body></html>";
        FileUtil.writeString(contentHtml,htmlPath.getAbsolutePath(),"UTF-8");
//        System.out.println(contentHtml);
        String wordFilename=id + ".doc";
        String wordPath=thisnewBaseDir + "/" + wordFilename;
        try {
            AsponseUtil.html2word(htmlPath.toString(),wordPath);
            htmlPath.delete();
        } catch (Exception e) {
            e.printStackTrace();
            System.out.println("创建word文档失败！");
        }
        return new File(wordPath);
    }

    Map<String,String> monthConvert=new HashMap<String, String>(){{
        put("Jan","01");
        put("Feb","02");
        put("Mar","03");
        put("Apr","04");
        put("May","05");
        put("Jun","06");
        put("Jul","07");
        put("Aug","08");
        put("Sep","09");
        put("Oct","10");
        put("Nov","11");
        put("Dec","12");
    }};

    Map<String,String> amPmConvert=new HashMap<String, String>(){{
        put("AM","上午");
        put("PM","下午");
    }};

    private Date dateStrConvert(String timeString){
        timeString=timeString.replace("Posted at:","").replace("  "," ").trim();
        String[] datasplit = timeString.split(" ");
        String dataStr=datasplit[0]+" "+(datasplit[1].length()==1?"0"+datasplit[1]:datasplit[1])+" "+datasplit[2]+" "+(datasplit[3].split(":")[0].length()==1?"0"+datasplit[3]:datasplit[3]);
        for (String enName : monthConvert.keySet()) {
            dataStr=dataStr.replace(enName,monthConvert.get(enName));
        }
        for (String enName : amPmConvert.keySet()) {
            dataStr=dataStr.replace(enName,amPmConvert.get(enName));
        }
        return new Date(DateUtil.parse(dataStr,"MM dd yyyy hh:ssaa").getTime()+(150*60*1000));
    }

    public static void main(String[] args) {
        UniindiaCrawler crawler=new UniindiaCrawler("UniindiaCrawler", true);
        try {
            crawler.start(4);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
