import scrapy
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http import Request
import re
from ai_hui_shou.items import AiHuiShouItem


class AiHuiShou(scrapy.Spider):
    name = 'ai_hui_shou'
    # allowed_domains = 'https://www.aihuishou.com'
    start_urls = ['https://www.aihuishou.com/shouji/b52']

    def parse(self, response):
        # 获取手机品牌
        brand_ist = response.xpath("//*[ @id = 'body']/div[3]/dl[2]/div[1]/div/dd")
        for brand_item in brand_ist:
            # print(brand_item)
            item = AiHuiShouItem()
            item['brand'] = brand_item.xpath('./a/text()').extract_first()
            brand_item_url = brand_item.xpath('./a/@href').extract_first()
            item['class_href'] = 'https://www.aihuishou.com' + brand_item_url
            yield scrapy.Request('https://www.aihuishou.com%s' % (brand_item_url),
                                 callback=self.goods_detail,
                                 meta={"item": item}
                                 )

    #  获取品牌下商品的页面
    # def brand_page_url(self, response):
    #     item = response.meta['item']
    #     print( item)
    #     brand_page_sum = response.xpath('//*[@id="body"]/div[5]/a/text()').extract()
    #     if (len(brand_page_sum) == 1):
    #         yield scrapy.Request(response.url, callback=self.goods_detail)
    #     else:
    #         for page_num in range(2, int(brand_page_sum[-2])):
    #             yield scrapy.Request("%s-p%s" % (response.url, page_num),callback=self.goods_detail)

    # 获取商品
    def goods_detail(self, response):
        item = response.meta['item']
        data_wrap_list = response.xpath("//*[contains(@class , 'product-list-wrapper')]/ul/li/a")
        page_num = response.xpath("//*[contains(@class,'page active')]/text()").extract_first()
        print(page_num)
        for item_data in data_wrap_list:
            item['img'] = item_data.xpath("//*[contains(@class ,'img-box')]/img/@src").extract_first()
            item['price'] = item_data.xpath("//*[contains(@class ,'price')]/em/text()").extract_first()
            item['name'] = item_data.xpath("./p/text()").extract_first()
            yield item
        # print(len(data_wrap_list))
        if( len(data_wrap_list) >= 24):
            yield scrapy.Request("%s-p%s" % (item['class_href'], int(page_num ) + 1), callback=self.goods_detail, meta={'item': item})
