#!/usr/bin/python
# -*- coding: UTF-8 -*-

import os

import scrapy
from scrapy import cmdline
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor

from ShopTrackingSpider.items import DhgateShopItem, DhgateShopPage
from ...utils import json_util
from ...common import CommonUtil

# 分页数据
pageList = []


class DhgateShop:
    def start(self):
        print("==========DhagteShop start==========")
        cmdline.execute(['scrapy', 'crawl', 'DhgateShop'])
        for page in pageList:
            print(page['name'])
        print("==========DhagteShop end==========")


# 根据分类爬取数据


class ShopSpider(scrapy.Spider):
    # 指定Pipeline
    custom_settings = {
        'ITEM_PIPELINES': {
            'ShopTrackingSpider.pipelines.DhgateCategorieList': 100,
        }
    }
    # 定义爬虫名称
    name = 'DhgateShop'
    # 搜索关键字
    key_list = ['phone', ]
    domain = 'www.dhgate.com'
    base_url = 'https://' + domain
    search_url = '/wholesale/search.do?act=search&sus=&searchkey='
    # 爬虫域名
    allowed_domains = [domain]
    # 爬虫链接
    start_urls = []

    for key in key_list:
        # f'{}{}',字符串拼接
        start_urls.append(f'{base_url}{search_url}{key}')
        # follow=False(不跟进), 只提取首页符合规则的url，然后爬取这些url页面数据，callback解析
        # Follow=True(跟进链接), 在次级url页面中继续寻找符合规则的url,如此循环，直到把全站爬取完毕
        # rules = (
        #     # Rule(LinkExtractor(restrict_xpaths='//div[@class="gallery-box"]'), callback='parseItem', follow=False),
        #     Rule(LinkExtractor(restrict_xpaths='//div[@class="progallery clearfix gallery-nosku"]'), callback='parseItem', follow=False),
        # )

    def parsePage(self, data):
        print('==========parsePage start==========')

        for item in data.xpath("./a"):
            print(item.xpath("@href").extract_first())
            pageList.append(item.xpath("@href").extract_first())
        print('==========parsePage end==========')

        return

    def parseItem(self, response):
        print('==========parseItem start==========')
        for item in response.xpath('//div[@class="gwrap"]/div'):
            # 商品名
            print(item.xpath('./h3/a/text()').extract())
            print(item.xpath('./div/ul/li[@class="price"]/span/text()').extract())
            print(item.xpath('./div/ul/li[@class="costprice"]/span/text()').extract())
            #     oneItem = DhgateShopPage()
            #     oneItem['name'] = item.xpath('text()').extract()
            #     oneItem['num'] = item.xpath("@href").extract_first()
            #     pageList.append(oneItem)
            yield
        print('==========parseItem end==========')
        return

    def parse(self, response):
        print('==========parse start==========')
        self.parsePage(response.xpath('//div[@class="page"]/span'))
        self.parseItem(response)

        try:
            print(pageList[0])
            yield scrapy.Request(pageList[0], callback=self.parseItem)
        except:
            print('==========parse end==========')
        return
