import scrapy
from bs4 import BeautifulSoup as BS
import json

from spider1.items import Spider1Item, QuotesItemLoader


class QuotesSpider(scrapy.Spider):
    name = 'quotes'

    def start_requests(self):
        urls = ['http://quotes.toscrape.com/page/1/',
               'http://quotes.toscrape.com/page/2/',
               'http://quotes.toscrape.com/page/2/',
               'http://quotes.toscrape.com/page/2/',
               'http://quotes.toscrape.com/page/2/',
               'http://quotes.toscrape.com/page/3/']

        for url in urls:
            meta = dict(a=123, b=789)
            yield scrapy.Request(url, callback=self.parse2, meta=meta)

    def parse(self, response):
        num = response.url.rstrip('/').split('/')[-1]
        ofile = open('quotes_%s.jl' % num, 'w')
        for quote in response.xpath('//div[@class="quote"]'):
            text = quote.xpath('.//span[@class="text"]/text()').extract_first()
            author = quote.xpath('.//small[@class="author"]/text()').extract_first()
            tags = quote.xpath('.//meta[@class="keywords"]/@content').extract_first()
            record = dict(text=text, author=author, tags=tags)
            ofile.write(json.dumps(record) + '\n')
        ofile.close()

    def parse2(self, response):
        meta = response.meta
        print('--- meta:', meta)

        for quote in response.xpath('//div[@class="quote"]'):
            loader = QuotesItemLoader(selector=quote, response=response)
            loader.add_xpath('text', './/span[@class="text"]/text()')
            loader.add_xpath('author', './/small[@class="author"]/text()')
            loader.add_xpath('tags', './/meta[@class="keywords"]/@content')
            yield loader.load_item()
