import json
import os
import re
import time

import scrapy
import requests

from ..items import DdReptileItem


class DdSpider(scrapy.Spider):
    """  爬取当当网站数据（测试使用） """
    name = 'dd'
    allowed_domains = ['e.dangdang.com']
    start_urls = ['https://e.dangdang.com/media/api2.go?action=mediaCategory&channelType=ddds&start=0&end=5&level=6&channelId=10020&deviceSerialNo=html5&macAddr=html5&channelType=html5&permanentId=20211026081107328153407683428350670&returnType=json&channelId=70000&clientVersionNo=6.8.0&platformSource=DDDS-P&fromPlatform=106&deviceType=pconline&token=']
    request_url = 'https://e.dangdang.com/media/api.go?action=mediaCategoryLeaf&promotionType=1&deviceSerialNo=html5&macAddr=html5&channelType=html5&permanentId=20211026081107328153407683428350670&returnType=json&channelId=70000&clientVersionNo=6.8.0&platformSource=DDDS-P&fromPlatform=106&deviceType=pconline&token=&start=0&end=20&category=%s&dimension=dd_sale&order=0'
    request_url_page = 'https://e.dangdang.com/media/api.go?action=mediaCategoryLeaf&promotionType=1&deviceSerialNo=html5&macAddr=html5&channelType=html5&permanentId=20211026081107328153407683428350670&returnType=json&channelId=70000&clientVersionNo=6.8.0&platformSource=DDDS-P&fromPlatform=106&deviceType=pconline&token=&start=%s&end=%s&category=%s&dimension=dd_sale&order=0'

    """
    1.获取所有的分类
    2.获取分类下的图书信息
    """

    def parse(self, response):
        # 获取所有的分类
        with open(file='./dd_catagory.json', mode='w', encoding='utf-8') as fp:
            fp.write(response.text)
        print("=============================================================")
        print('====================   获取所有的分类结束   ====================')
        print("=============================================================")

        # 点击分类信息 分析跳转的链接:每次传递的分页参数和category参数不一致
        # 一级分类  category=MH这个参数不一样  所有的子级分类数据还是被一级分类包含 所以只需要一级分类就够了
        # https://e.dangdang.com/media/api.go?action=mediaCategoryLeaf&promotionType=1&deviceSerialNo=html5&macAddr=html5&channelType=html5&permanentId=20211026081107328153407683428350670&returnType=json&channelId=70000&clientVersionNo=6.8.0&platformSource=DDDS-P&fromPlatform=106&deviceType=pconline&token=&start=0&end=20&category=WY1&dimension=dd_sale&order=0
        # https://e.dangdang.com/media/api.go?action=mediaCategoryLeaf&promotionType=1&deviceSerialNo=html5&macAddr=html5&channelType=html5&permanentId=20211026081107328153407683428350670&returnType=json&channelId=70000&clientVersionNo=6.8.0&platformSource=DDDS-P&fromPlatform=106&deviceType=pconline&token=&start=0&end=20&category=MH&dimension=dd_sale&order=0

        # 二级分类
        # https://e.dangdang.com/media/api.go?action=mediaCategoryLeaf&promotionType=1&deviceSerialNo=html5&macAddr=html5&channelType=html5&permanentId=20211026081107328153407683428350670&returnType=json&channelId=70000&clientVersionNo=6.8.0&platformSource=DDDS-P&fromPlatform=106&deviceType=pconline&token=&start=0&end=20&category=CARTOONQG&dimension=dd_sale&order=0
        # https://e.dangdang.com/media/api.go?action=mediaCategoryLeaf&promotionType=1&deviceSerialNo=html5&macAddr=html5&channelType=html5&permanentId=20211026081107328153407683428350670&returnType=json&channelId=70000&clientVersionNo=6.8.0&platformSource=DDDS-P&fromPlatform=106&deviceType=pconline&token=&start=0&end=20&category=CARTOONDZ&dimension=dd_sale&order=0

        # 三级分类
        # https://e.dangdang.com/media/api.go?action=mediaCategoryLeaf&promotionType=1&deviceSerialNo=html5&macAddr=html5&channelType=html5&permanentId=20211026081107328153407683428350670&returnType=json&channelId=70000&clientVersionNo=6.8.0&platformSource=DDDS-P&fromPlatform=106&deviceType=pconline&token=&start=0&end=20&category=JJLLYFF&dimension=dd_sale&order=0
        # https://e.dangdang.com/media/api.go?action=mediaCategoryLeaf&promotionType=1&deviceSerialNo=html5&macAddr=html5&channelType=html5&permanentId=20211026081107328153407683428350670&returnType=json&channelId=70000&clientVersionNo=6.8.0&platformSource=DDDS-P&fromPlatform=106&deviceType=pconline&token=&start=0&end=20&category=SZJY&dimension=dd_sale&order=0

        # 解析json
        # 遍历分类信息 获取数据
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36 Edg/93.0.961.38'
        }

        category_first_obj = json.load(open(file='./dd_catagory.json', mode='r', encoding='utf-8'))
        category_code_list = category_first_obj['data']['catetoryList'][0]
        for cate1_obj in category_code_list['catetoryList']:
            first_name = cate1_obj.get('name')
            first_code = cate1_obj.get('code')
            category_second_obj = cate1_obj.get('catetoryList')
            for cate2_obj in category_second_obj:
                second_name = cate2_obj.get('name')
                second_code = cate2_obj.get('code')
                print('------------------------ 当前遍历的是一级菜单 %s[%s] 下的 %s[%s] start. ------------------------' % (first_name, first_code, second_name, second_code))
                # 第一次请求主要是为了获取每个分类下的数据总数 计算页数
                contents = requests.get(url=(self.request_url % second_code), headers=headers)
                data_total = json.loads(contents.text).get('data').get('total')
                data_total = int(data_total) if (data_total is not None) else 0
                # print('url ----> ', (self.request_url_page % (data_total, second_code)))
                page_size = 50
                if data_total % page_size == 0:
                    page_num = data_total / page_size
                else:
                    page_num = (data_total / page_size) + 1
                page_num = int(page_num)
                print('当前分类[%s]下共有数据[%s]条,一共分页[%s]' % (second_name, data_total, page_num))
                for i in range(page_num-1):
                    start_page = i*50+i
                    end_page = start_page + page_size
                    # time.sleep(1)
                    request = scrapy.Request(url=(self.request_url_page % (start_page, end_page, second_code)), callback=self.parse_second)
                    request.meta['second_name'] = second_name
                    request.meta['start_page'] = start_page
                    request.meta['end_page'] = end_page
                    yield request
                print('------------------------ 当前遍历的是一级菜单 %s[%s] 下的 %s[%s] end. ------------------------' % (first_name, first_code, second_name, second_code))

    def parse_second(self, response):
        sale_list = json.loads(response.text).get('data')
        # 先创建目录和文件  re.sub()去除目录和文件中的斜杠反斜杠
        dirname = re.sub(r'\\', '', (response.meta['second_name']))
        dirname = re.sub(r'/', '', (dirname))
        if os.path.isdir(dirname):
            pass
        else:
            os.mkdir(dirname)
        filename = ('%s_%s-%s.json' % (dirname, response.meta['start_page'], response.meta['end_page']))
        with open(file=dirname+'/'+filename, mode='a', encoding='utf-8') as fp:
            fp.write(str(sale_list))
        sale_list = sale_list.get('saleList')

        if sale_list is not None:
            for sale_entity in sale_list :
                bookName = sale_entity['mediaList'][0]['title'] if ('title' in (sale_entity['mediaList'][0])) else ''
                authorName = sale_entity['mediaList'][0]['authorPenname'] if ('authorPenname' in (sale_entity['mediaList'][0])) else ''
                categoryId = sale_entity['mediaList'][0]['categoryIds'] if ('categoryIds' in (sale_entity['mediaList'][0])) else ''
                categoryName = sale_entity['mediaList'][0]['categorys'] if ('categorys' in sale_entity['mediaList'][0]) else ''
                commentNumber = sale_entity['mediaList'][0]['commentNumber'] if ('commentNumber' in sale_entity['mediaList'][0]) else 0
                desc = str(sale_entity['mediaList'][0]['descs']) if str('descs' in (sale_entity['mediaList'][0])) else ''
                mediaId = sale_entity['mediaList'][0]['mediaId'] if ('mediaId' in sale_entity['mediaList'][0]) else ''
                originalPrice = sale_entity['mediaList'][0]['originalPrice'] if ('originalPrice' in sale_entity['mediaList'][0]) else 0.0
                # paperBookPrice = sale_entity['mediaList'][0]['paperBookPrice'] if (sale_entity['mediaList'][0]['paperBookPrice']) else 0.0
                price = sale_entity['mediaList'][0]['price'] if ('price' in sale_entity['mediaList'][0]) else 0.0
                promotionPrice = sale_entity['mediaList'][0]['promotionPrice'] if ('promotionPrice' in sale_entity['mediaList'][0]) else 0.0
                salePrice = sale_entity['mediaList'][0]['salePrice'] if ('salePrice' in sale_entity['mediaList'][0]) else 0.0
                vipPrice = sale_entity['mediaList'][0]['vipPrice'] if ('vipPrice' in sale_entity['mediaList'][0]) else 0.0
                print('开始准备下载的信息是：', bookName, authorName, categoryName, categoryId, commentNumber, desc, mediaId,originalPrice, 0.0, price, promotionPrice, salePrice, vipPrice)
                item = DdReptileItem(bookName=bookName, authorName=authorName, categoryName=categoryName, categoryId=categoryId, commentNumber=commentNumber, desc=desc,
                                     mediaId=mediaId, originalPrice=originalPrice, paperBookPrice=0.0, price=price, promotionPrice=promotionPrice, salePrice=salePrice, vipPrice=vipPrice)
                yield item
        # pass