import scrapy
import pprint
import re
from copy import deepcopy       # 导入深拷贝
from ..items import SuningbookItem


class SuningSpider(scrapy.Spider):
    name = 'suning'
    allowed_domains = ['suning.com']
    start_urls = ['http://book.suning.com/']

    def parse(self, response, **kwargs):
        item_list = []   # 使用字典的方式做一个结构从属关系
        # 1.先获取主标题，比如：文学艺术、少儿、人文社科、经管励志......
        main_title_list = response.xpath("//div[@class='menu-list']/div/dl/dt[not(@class)]/h3/a")       # 排除掉了进口原版书、期刊杂志
        for index, elem in enumerate(main_title_list):
            main_title = elem.xpath("./text()").extract_first()             # 获取到主标题的文本
            main_title_url = elem.xpath("./@href").extract_first()
            # 2.获取副标题，比如：小说、青春文学、艺术......由于副标题与主标题有从属关系，所以得放到enumerate中
            vice_titles = []
            item_list.append({"main_title": main_title, "main_title_url": main_title_url, "vice_titles": vice_titles})
            vice_title_list = response.xpath(f"//div[@class='menu-sub'][{index + 1}]/div[@class='submenu-left']/p[@class='submenu-item']/a")
            for index, elem in enumerate(vice_title_list):
                vice_title = elem.xpath("./text()").extract_first()
                vice_title_url = elem.xpath("./@href").extract_first()
                child_titles = []
                vice_titles.append({"vice_title": vice_title, "vice_title_url": vice_title_url, "child_titles": child_titles})
                # if not vice_titles:     # 说明是特殊的：进口原版书、期刊杂志，需要对它们进行特殊处理，因为它们没有副标题的，只有子标题
                child_title_list = elem.xpath("./parent::p/following-sibling::ul[1]/li/a")
                for each in child_title_list:
                    child_title = each.xpath("./text()").extract_first()
                    child_title_url = each.xpath("./@href").extract_first()
                    child_titles.append({"child_title": child_title, "child_title_url": child_title_url})

        # 现在item_list中就有我想要的子标题的网址，通过遍历它发出请求
        print(pprint.pformat(item_list), "item打印到这里结束!!!!!!!!!!!!")
        pat1 = re.compile(r".*1-(\d+)-.*")              # 普通的正则表达式，用来提取id
        # TODO：完成这类正则
        pat2 = re.compile(r"")                          # 提取/bookSearch.do?keyword=CET-6这样的正则
        for each in item_list:
            for each in each.get("vice_titles"):
                for each in each.get("child_titles"):
                    final_url = each.get("child_title_url")         # 并不是要直接访问这个网址，而是要提取其中的id，进行ajax访问
                    result = re.search(pat1, final_url)
                    if result:
                        book_list_id = result.group(1)
                        # 如果提取到id的话，则先提交请求一次，获取到总页数；然后用这个总页数来判断是否继续翻页
                        url = f"https://list.suning.com/emall/showProductList.do?ci={book_list_id}&pg=03&cp=0&paging=0"
                        print(f"url is {url}")
                        yield scrapy.Request(
                            url=url,
                            callback=self.parse_book_list_init,
                            meta={"item": {"url": final_url}}  # 传递这个给后续的处理函数，让其加上page_num键-值
                        )
                    else:
                        # 说明final_url为https://search.suning.com/emall/bookSearch.do?keyword=CET-6这样的，需要重新换个
                        book_list_id = ""
                        # TODO:完成新的parse函数，用来提取/bookSearch.do?keyword=CET-6需要发送新的请求

                        # print(response.meta["item"])
                        # yield scrapy.Request(
                        #     url=f"https://list.suning.com/emall/showProductList.do?ci={book_list_id}&pg=03&cp=0&paging=1",
                        # )
        # yield scrapy.Request(
        #     url="https://list.suning.com/emall/showProductList.do?ci=502687&pg=03&cp=0&paging=0",
        #     callback=self.parse_book_list_init,
        #     meta={"item": {"url": "https://list.suning.com/1-502687-0.html"}}
        # )

    def parse_book_list_init(self, response):
        """因为总页数在请求这个网址中才能获得，所以要先请求这个网址一次来"""
        print("有到这里吗？")
        item = response.meta["item"]
        page_num_pat = re.compile(r'.*param.pageNumbers = "(\d+)".*')
        # print("response.text 是", response.text)
        page_num = int(re.search(page_num_pat, response.text).group(1))     # 获得数字类型的总页数
        item["page_num"] = page_num
        print("*"*20, item)
        yield SuningbookItem(
            item=item
        )
        # 后续只用让currentPage <= 总页数 就行了

    # def parse_book_list(self, response):
    #     """处理子标题点进去后的图书列表，具体到每一本书的链接，无论是"""
