#!/usr/bin/python
# -*- coding: UTF-8 -*-

import scrapy
from ShopTrackingSpider.items import MerchandiseItem

# 分页数据
pageList = []


class CategoriesSpider(scrapy.Spider):
    # 指定Pipeline
    # custom_settings = {
    #     'ITEM_PIPELINES': {
    #         'ShopTrackingSpider.pipelines.DhgateCategoriesListPipeline': 100,
    #     }
    # }
    # 定义爬虫名称
    name = 'CategoriesSpider'
    # 搜索关键字
    key_list = ['phone', ]
    domain = 'www.dhgate.com'
    base_url = 'https://' + domain
    search_url = '/wholesale/search.do?act=search&sus=&searchkey='
    # 爬虫域名
    allowed_domains = [domain]
    # 爬虫链接
    start_urls = []

    for key in key_list:
        # f'{}{}',字符串拼接
        start_urls.append(f'{base_url}{search_url}{key}')

    def parsePage(self, data):
        print('==========parsePage start==========')

        for item in data.xpath("./a"):
            print(item.xpath("@href").extract_first())
            pageList.append(item.xpath("@href").extract_first())
        print('==========parsePage end==========')

        return

    def parseItem(self, response):
        print('==========parseItem start==========')
        categorie = response.xpath('//div[@class="bread-crumbs"]/h1/span/text()').extract_first()
        for item in response.xpath('//div[@class="gwrap"]/div'):
            oneItem = MerchandiseItem()
            oneItem['icon'] = 'http:' + str(item.xpath('./div[@class="photo"]/a/@lazyload-src').extract_first())
            oneItem['name'] = item.xpath('./h3/a/text()').extract_first()
            oneItem['categorie'] = categorie
            oneItem['platform'] = 'dhgate'
            oneItem['price'] = item.xpath('./div/ul/li[@class="price"]/span/text()').extract_first()
            oneItem['costprice'] = item.xpath('./div/ul/li[@class="costprice"]/span/text()').extract_first()
            ordersData = str(item.xpath(
                './div/ul/li[@class="clearfix customereview"]/span[@class="ordernum"]/text()').extract_first()).split(
                ' ')
            oneItem['orders'] = ordersData[0]
            oneItem['review_num'] = item.xpath(
                './div/ul/li[@class="clearfix customereview"]/span[@class="reviewnum"]/text()').extract_first()
            oneItem['seller_name'] = item.xpath('./div[@class="bottom-box"]/div/div/a/text()').extract_first()
            oneItem['shop_url'] = item.xpath('./div[@class="bottom-box"]/div/div/a/@href').extract_first()
            # print(item.xpath('./div[@class="bottom-box"]/div/div/a"'))
            print(oneItem)
            print('#######################################')
        print('==========parseItem end==========')
        return

    def parse(self, response):
        print('==========parse start==========')
        self.parsePage(response.xpath('//div[@class="page"]/span'))
        self.parseItem(response)

        try:
            print(pageList[0])
            yield scrapy.Request(pageList[0], callback=self.parseItem)
        except:
            print('==========parse ERR==========')
        print('==========parse end==========')
        pass
