package ua.com.globallogic.basecamp.krasnyanskiy.webcrawler;


import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

public class Crawler {

    List<String> result;
    FileWriter fileWriter;
    File dir;
    int pageCounter;

    public Crawler() {
        result = new ArrayList<String>();
        pageCounter = 1;
        dir = new File("Pages");
        dir.mkdirs();
    }

    public void download(String startURL, int depth) throws IOException {
        List<String> URLs = new ArrayList<String>();
        Document doc;
        try {
            doc = Jsoup.connect(startURL).get();
        } catch (Exception e) {
            e.printStackTrace();
            return;
        }

        Elements links = doc.select("a[href]");
        Elements imports = doc.select("link[href]");

        for (Element link : imports) {
            URLs.add(link.attr("abs:href"));
            result.add(link.attr("abs:href"));
        }

        for (Element link : links) {
            URLs.add(link.attr("abs:href"));
            result.add(link.attr("abs:href"));
        }

        buildHTML(doc.outerHtml());

        if (--depth > 0) {
            for (String s : URLs) {
                download(s, depth);
            }
        }
    }

    private void buildHTML(String doc) throws IOException {
        File file = new File(dir, "page" + pageCounter + ".html");
        file.createNewFile();
        fileWriter = new FileWriter(file);
        fileWriter.write(doc);
        fileWriter.close();
        pageCounter++;
    }
}