package com.undsf.comikamij.fzdm;

import com.undsf.comikamij.core.BaseCrawler;
import com.undsf.comikamij.core.entities.Chapter;
import com.undsf.comikamij.core.entities.Comic;
import com.undsf.comikamij.core.entities.Page;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.IOException;
import java.util.*;
import java.util.regex.Pattern;

/**
 * Created by Arathi on 2017/4/7.
 */
@Deprecated
public class FzdmCrawler extends BaseCrawler {
    private static Logger logger = LogManager.getLogger(FzdmCrawler.class);

    public static final String DOMAIN = "manhua.fzdm.com";
    public static final String URL_INDEX = "http://" + DOMAIN;
    public static final Pattern PATTERN_NUMBER = Pattern.compile("\\d+");

    protected static Map<String, Comic> ComicCache = new HashMap<>();

    protected Map<Integer, Page> pageCache;

    public FzdmCrawler() {
        super();
        pageCache = new TreeMap<>();
    }

    @Override
    public List<Comic> crawlIndex() {
        List<Comic> comics = new ArrayList<>();
        try {
            String html = client.get(URL_INDEX);
            Document doc = Jsoup.parse(html);
            String selector = "div#mhmain div.round";
            Elements rounds = doc.select(selector);
            for (Element round : rounds) {
                Element a = round.select("a").last();
                Comic comic = new Comic();
                comic.name = a.text();
                comic.id = a.attr("href");
                if (comic.id.endsWith("/")) {
                    comic.id = comic.id.substring(0, comic.id.length()-1);
                }
                comics.add(comic);
                ComicCache.put(comic.id, comic);
            }
        }
        catch (IOException ex) {
            ex.printStackTrace();
        }
        return comics;
    }

    @Override
    public List<Chapter> crawlTOC(String id) {
        String tocURL = URL_INDEX + "/" + id + "/";
        List<Chapter> chapters = new ArrayList<>();
        try {
            String html = client.get(tocURL);
            Document doc = Jsoup.parse(html);
            String selector = "#content li a";
            Elements as = doc.select(selector);

            for (Element a : as) {
                Chapter chapter = new Chapter();
                chapter.name = a.text();
                chapter.referrer = tocURL + a.attr("href");
                chapters.add(chapter);
            }
        }
        catch (IOException ex){
            ex.printStackTrace();
        }
        return chapters;
    }

    @Override
    public List<Page> crawlPagesOfChapter(String comic, String chapter) {
        pageCache.clear();
        return null;
    }

    public Page crawlPage(String referrer) {
        Page page = new Page();
        page.referrer = referrer;

        try {
            String html = client.get(referrer);
            Document doc = Jsoup.parse(html);
            Element navigation = doc.select("div.navigation").first();
            if (navigation.text().contains("最后一页了")) {
                page.lastPage = true;
            }
            Elements pageLinks = navigation.select("a");

            Element pic = doc.select("img#mhpic").first();
            page.url = pic.attr("src");

            return page;
        }
        catch (IOException ex) {
            logger.error(ex);
        }

        return null;
    }
}
