import requests
from lxml import etree
import json


def fetch(url):
    r = requests.get(url)
    if r.ok:
        return r.text
    else:
        return ''


def parse(html, base_url):
    tree = etree.HTML(html)
    records = []
    for div in tree.xpath('//div[@class="quote"]'):
        quote = div.xpath('.//span[@class="text"]/text()')
        author = div.xpath('.//small[@class="author"]/text()')
        tags = div.xpath('.//meta[@class="keywords"]/@content')
        record = dict(quote=quote, author=author, tags=tags)
        print('record extracted')
        records.append(record)
    next_url = get_next_url(tree, base_url)
    return next_url, records


def get_next_url(tree, base_url):
    url = tree.xpath('//a[contains(text(),"Next")]/@href')
    if not url:
        return None
    url = base_url + url[0]
    return url


if __name__ == '__main__':
    base_url = 'http://quotes.toscrape.com'
    url = 'http://quotes.toscrape.com/page/1/'
    all_records = []
    while True:
        html = fetch(url)
        if not html:
            print('failed to fetch page: %s' % url)
            break

        url, records = parse(html, base_url)
        all_records.extend(records)

        if not url:
            break

    with open('quotes.jl', 'w') as ofile:
        for record in all_records:
            json_line = json.dumps(record) + '\n'
            ofile.write(json_line)
