# -*- coding: utf-8 -*-
import scrapy
from scrapytest.items import QuotesItem


class QuotesSpiderSpider(scrapy.Spider):
    name = 'quotes'
    allowed_domains = ['quotes.toscrape.com']
    start_urls = ['http://quotes.toscrape.com']

    def parse(self, response):
        quote_list = response.xpath("//div[@class='quote']")
        #@href 取link href值
        next_page = response.xpath("//li[@class='next']/a/@href").extract_first()
        for quote in quote_list:
            item = QuotesItem()
            item['text'] = quote.xpath("span[@class='text']/text()").extract_first()
            item['author'] = quote.xpath("span/small[@class='author']/text()").extract_first()
            tag_list = quote.xpath("div[@class='tags']/a/text()").extract()

            item['tags'] =""
            for each in tag_list:
                if each == tag_list[len(tag_list)-1]:
                    item['tags'] = item['tags'] + each
                else:
                    item['tags'] = item['tags'] + each + ", "

            print("=== text%s, author=%s, tag%s ===="%(item["text"], item["author"], item['tags']))
            yield item

        if next_page is not None:
            url = self.start_urls[0] + next_page # 拼接url
            print("== url=%s ==" %url)
            # callback回调，self.parse不带括号
            yield scrapy.Request(url, callback=self.parse)

