import lxml.html
import requests
import re
from functools import *
from toolz.curried import *
from languagetools import *
import article_ns
# https://toolz.readthedocs.io/en/latest/api.html

def build_addr(page_ix):
    page_ix = page_ix + 1 # num+1, nes prasideda nuo 1
    adr = 'http://www.delfi.lt/archive/index.php' \
            '?fromd=01.01.2016' \
            '&tod=14.12.2016' \
            '&page={0}'.format(page_ix)
    return adr

def download_html(addr):
    return pipe(addr, requests.get, getter('text'), lxml.html.fromstring)

def extract_article_urls(html):
    return html.xpath( \
            '//div[@class="arch-search-list"]' \
            '/ol/li/*/a[@class="arArticleT"]/@href' )

def parse_article_url(url):
    (cat1, cat2, cat3, gid) = \
            re.search(
            'http://www.delfi.lt/(.+?)/(.+?)/(?:(.+?)/)?.+?' \
            '\?id=(.+?)$', url) \
            .groups()
    category_chain = thread_last(
            [cat1, cat2, cat3],
            (filter, is_some),
            list )
    return dict(category_chain=category_chain, gid=gid)

def when_empty(f,x):
    if is_empty(x):
        return f()
    else:
        return x

when_empty = curry(when_empty)

extr_first_para = \
        lambda html: pipe(
                html.xpath('//div[@class="delfi-article-lead"]/b'),
                when_empty(lambda:
                    html.xpath('//div[@id="article-lead" and @itemprop="description"]')),
                map(lambda el: el.text_content()),
                first_or_none )

extr_body_tail = \
        lambda html: pipe(
                html.xpath('//div[@itemprop="articleBody"]/p'),
                when_empty(lambda:
                    html.xpath('//div[@class="holder"]/div[1]/p')),
                map(lambda p: p.text_content()),
                list )

extr_source = \
        lambda html: pipe(
                html.xpath('//div[@class="delfi-source-name"]'),
                map(lambda el: el.text_content()),
                map(str.strip),
                first_or_none )

extr_author = \
        lambda html: pipe( 
                html.xpath('//meta[@name="author"]/@content'),
                when_empty(lambda:
                    pipe(
                        html.xpath('//div[@class="delfi-author-name" and @itemprop="author"]'),
                        map(lambda el: el.text_content()) )),
                map(str.strip),
                first_or_none )

extr_title = \
        lambda html: pipe( 
            html.xpath('//meta[@property="og:title"]/@content'),
            first_or_none )

import iso8601
extr_date = \
        lambda html: pipe(
            html.xpath('//meta[@itemprop="datePublished"]/@content'),
            map(iso8601.parse_date),
            first_or_none )

extr_url = lambda html: pipe(
        html.xpath('/html/head/link[@rel="canonical"]/@href'),
        first_or_none )

def obligatory(x):
    assert x is not None
    return x

def html2article(html):
    # darau obligatory nes reikia rasti jeigu dar kazkur neveikia
    _first_para = extr_first_para(html)
    _body_tail = extr_body_tail(html)
    body = list(cons(_first_para, _body_tail)) \
            if is_some(_first_para) else _body_tail
    body = pipe( body,
            filter(article_ns.para_long_enough),
            list )
    url = obligatory( extr_url(html) )
    date = extr_date(html)
    title = extr_title(html)
    author = extr_author(html)
    source = extr_source(html)
    gid, category_chain = thread_first(
            parse_article_url(url),
            (pluck, 'gid', 'category_chain') )
    return dict( \
            source=source, has_first_para=is_some(_first_para),
            body=body, date=date, url=url, author=author, \
            category_chain=category_chain, gid=gid, title=title)

is_video_only = \
        lambda article: \
        pipe( article['category_chain'], first, equals('video') )

is_non_article = \
        lambda article: \
        is_video_only(article) or article_ns.has_too_little_text(article)

def download_pages_of_articles(n, workers_n=2):
    import multiprocessing as mp
    dl_urls_from_page_n = \
            compose(extract_article_urls, download_html, build_addr)
    dl_article_from_url = \
            compose(html2article, download_html)
    with mp.Pool(workers_n) as pool:
        map = curry(pool.map)
        return \
                pipe( range(n),
                        map(dl_urls_from_page_n),
                        concat,
                        map(dl_article_from_url),
                        remove(is_non_article),
                        list )

"""
    return \
            pipe( range(n),
                    map(build_addr),
                    map(download_html),
                    map(extract_article_urls),
                    concat,
                    map(download_html),
                    map(html2article),
                    list
                    )
"""

"""
pipe( build_addr(0),
        download_html,
        extract_article_urls,
        map(download_html),
        map(html2article) )


for html in article_htmls:
    for i in range(len(article_htmls)):
        try:
            html2article(html)
        except IndexError:
            print('stopped at {0}.'.format(i))
            raise

"""
