package com.scraper.articles.seekingalpha;

import com.gargoylesoftware.htmlunit.WebClient;
import com.scraper.articles.BaseScraper;
import com.scraper.articles.domain.Article;
import com.scraper.articles.exception.ScraperException;
import org.jsoup.HttpStatusException;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

public class SeekingAlphaScraper extends BaseScraper<Article> {

    public static final String USER_AGENT = "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36";
    private final String BASE_URL = "http://seekingalpha.com";
    private final WebClient webClient;

    public SeekingAlphaScraper(Element element, WebClient webClient) {
        super(element);
        this.webClient = webClient;
    }

    @Override
    protected Article parseDocument(Document document) {
        Article article = new Article();
        article.setUrl(document.location());

        Elements title = document.select("#page_header span[itemprop=headline]");
        article.setTitle(title.get(0).text());

        Elements text = document.select("#article_body");
        article.setHtmlText(text.html());
        article.setText(text.text());

        Elements images = document.select("#article_body img");
        List<String> imagesList = new ArrayList<>();
        for (Element image : images) {
            imagesList.add(image.attr("src"));
        }
        article.setImages(imagesList);

        Elements date = document.select(".article_info_pos span[itemprop=datePublished]");
        article.setDate(date.text());

        Elements author = document.select(".author_name");
        article.setAuthor(author.text());

        return article;
    }

    @Override
    protected Document getDocument(Element element) {
        try {
            String href = BASE_URL + element.attr("href");
            System.out.println("HREF: " + href);
            return Jsoup.connect(href).userAgent(USER_AGENT).timeout(20000).get();
        } catch (HttpStatusException e) {
            try {
                Thread.sleep(2000);
            } catch (InterruptedException e1) {
                throw new ScraperException(e);
            }
            return getDocument(element);
        } catch (IOException e) {
            throw new ScraperException(e);
        }
    }
}
