#!/usr/bin/python
# -*- coding: UTF-8 -*-
import json

import scrapy

from ShopTrackingSpider.items import CategorieItem
from ShopTrackingSpider.pipelines import DhgateCategoriesListPipeline
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ...utils import json_util

itemList = []


class CategoriesList(scrapy.Spider):
    # 指定Pipeline
    custom_settings = {
        'ITEM_PIPELINES': {
            'ShopTrackingSpider.pipelines.DhgateCategoriesListPipeline': 100,
        }
    }
    # 定义爬虫名称
    name = 'CategoriesList'
    # 爬虫域名
    base_url = 'https://www.dhgate.com'
    allowed_domains = ['www.dhgate.com']
    # 爬虫链接
    start_urls = [
        'https://www.dhgate.com/all-categories/index.html#pu1806-all', ]

    def parse(self, response):
        print('==========parse start==========')
        for categonies in response.xpath('//div[@class="first"]/dl'):
            # print(categonies.xpath('.//dt/a/text()').extract())
            # print(categonies.xpath('./dd/a/text()').extract())
            # categorie = 'parent'
            # name = categonies.xpath('./dt/a/text()').extract()
            # url = self.base_url + categonies.xpath("./dt/a/@href").extract_first()
            # oneItem = DhgateCategorieItem(name, url, categorie)  # 实例化一个item对象，将获取到的数据存入
            parentItem = CategorieItem()  # 实例化一个item对象，将获取到的数据存入
            parentItem['categorie'] = 'parent'
            parentItem['name'] = categonies.xpath('./dt/a/text()').extract_first()
            parentItem['url'] = self.base_url + categonies.xpath("./dt/a/@href").extract_first()
            itemList.append(parentItem)
            for nameItem in categonies.xpath('.//dd/a'):
                # print(nameItem.xpath('text()').extract())
                # categorie = categonies.xpath('./dt/a/text()').extract()
                # name = nameItem.xpath('text()').extract()
                # url = self.base_url + nameItem.xpath("@href").extract_first()
                # oneItem = DhgateCategorieItem(name, url, categorie)  # 实例化一个item对象，将获取到的数据存入
                subItem = CategorieItem()  # 实例化一个item对象，将获取到的数据存入
                subItem['categorie'] = categonies.xpath('./dt/a/text()').extract_first()
                subItem['name'] = nameItem.xpath('text()').extract_first()
                subItem['url'] = self.base_url + nameItem.xpath("@href").extract_first()
                itemList.append(subItem)
                yield subItem
            yield parentItem

        try:
            for oneItem in itemList:
                if oneItem['categorie'] == 'parent':
                    yield scrapy.Request(oneItem['url'], callback=self.parseSub)
        except:
            print('==========scrapy.Request ERR==========')

        self.printItemList()
        print('==========parse end==========')
        pass

    def parseSub(self, response):
        print('==========parseSub start==========')
        # print(response.xpath('//div[@class="b-catelist"]/dl').extract())
        for categonies in response.xpath('//div[@class="b-catelist"]/dl'):
            # print(categonies.xpath('./dt/dt/b/text()').extract())
            for nameItem in categonies.xpath('.//dd/a'):
                oneItem = CategorieItem()  # 实例化一个item对象，将获取到的数据存入
                oneItem['categorie'] = categonies.xpath('./dt/dt/b/text()').extract()
                oneItem['name'] = nameItem.xpath('text()').extract()
                oneItem['url'] = self.base_url + nameItem.xpath("@href").extract_first()
                itemList.append(oneItem)
                yield oneItem
        # self.printItemList()
        print('==========parseSub end==========')
        return

    def printItemList(self):
        print(itemList)
        # for item in itemList:
        #     print(item)
        return

    # def parse(self, response):
    #     print('==========parse start==========')
    #     head = response.xpath('//head/title/text()').extract()
    #     title = head[0].split('-')
    #     # print(title[0].strip())
    #     if title[0].strip() == 'All Categories'.strip():
    #         self.parseAll(response)
    #     else:
    #         self.parseSub(response)
    #     yield scrapy.Request('https://www.dhgate.com/all-categories/index.html#pu1806-all', callback=self.parseSub)
    #     print('==========parse end==========')
    #
    # def parseSub(self, response):
    #     print('==========parseSub start==========')
    #     # print(response.body)
    #     print('==========parseSub end==========')
    #     return
    #
    # def parseAll(self, response):
    #     print('==========parseAll start==========')
    #     # print(response.body)
    #
    #     for categonies in response.xpath('//div[@class="first"]/dl'):
    #         # print(categonies.xpath('.//dt/a/text()').extract())
    #         # print(categonies.xpath('./dd/a/text()').extract())
    #         oneItem = DhgateCategorieItem()  # 实例化一个item对象，将获取到的数据存入
    #         oneItem['categorie'] = 'parent'
    #         oneItem["name"] = categonies.xpath('./dt/a/text()').extract()
    #         oneItem["url"] = self.base_url + categonies.xpath("./dt/a/@href").extract_first()
    #         itemList.append(oneItem)
    #         for nameItem in categonies.xpath('.//dd/a'):
    #             # print(nameItem.xpath('text()').extract())
    #             oneItem = DhgateCategorieItem()  # 实例化一个item对象，将获取到的数据存入
    #             oneItem['categorie'] = categonies.xpath('./dt/a/text()').extract()
    #             oneItem["name"] = nameItem.xpath('text()').extract()
    #             oneItem["url"] = self.base_url + nameItem.xpath("@href").extract_first()
    #             # print(oneItem['name'])
    #             itemList.append(oneItem)
    #             # yield oneItem
    #
    #     for item in itemList:
    #         print(item)
    #     # print(oneItem['url'])
    #     # try:
    #     #     oneItem = itemList[0]
    #     #     yield scrapy.Request(oneItem['url'], callback=self.parseSub)
    #     # except:
    #     #
    #     #     print('==========parseAll end==========')
