import scrapy
from scrapy import cmdline
from lxml import etree
from icecream import ic
from amazon import items

class F1Spider(scrapy.Spider):
    name = 'f1'
    # allowed_domains = ['www.amazon.cn']
    # start_urls = ['http://www.amazon.cn/']

    rule = {
        'title': './span/div/span/a/div/text()',
        'author': './span/div/span/div/span[@class="a-size-small a-color-base"]/text()',
        'star': './span/div/span/div[@class="a-icon-row a-spacing-none"]/a[1]/@title',
        'price': './span/div/span/div/a/span/span[@class="p13n-sc-price"]/text()'
    }

    def start_requests(self):
        url = 'https://www.amazon.cn/gp/bestsellers/digital-text/116169071/ref=sv_kinc_4'
        yield scrapy.Request(url,callback = self.parse)

    def parse(self, response):
        html = etree.HTML(response.text)
        tag = html.xpath('//div[@class="a-fixed-left-grid-col a-col-left"]/ul/ul/ul/ul/li')
        item = items.AmazonItem()
        for i in tag:
            try:
                # ic(type(tag))
                item['url'] = i.xpath('./a/@href')
                item['name'] = i.xpath('./a/text()')
                # 保存数据做测试 连接MYSQL
                yield item

                sql = 'select id,name,url from caty'
                self.cursor.execute(sql)
                detail_url = self.cursor.fetchall()
                # ic(detail_url)
                for u in detail_url:
                    yield scrapy.Request(url = u[2],callback = self.parse_detail,meta = {'id':u[0]})
            except Exception as e:
                ic(e)
                continue

    def parse_detail(self,response):
        tag = response.xpath('//div[@id ="zg-center-div"]/ol/li')

        for j in tag:
            item = items.DetailItem()
            item['title'] = j.xpath(self.rule['title']).extract_first()
            item['author'] = j.xpath(self.rule['author']).extract_first()
            item['start'] = j.xpath(self.rule['star']).extract_first()
            item['price'] = j.xpath(self.rule['price']).extract_first()
            item['c_id'] = response.meta['id']
            yield item
        page = response.xpath('//div[@class="a-text-center"]/ul/li[@class="a-last"]/a/@href')
        if page:
            page1 = page.extract_first()
            yield scrapy.Request(url = page1,callback=self.parse_detail,meta = {'id':response.meta['id']})

if __name__ == '__main__':
    cmdline.execute('scrapy crawl f1'.split())