import scrapy
import math
from books.utils import outputInfo
import isbnlib


def getURL(key, page):
    return f'http://search.m.dangdang.com/search_ajax.php?action=all_search&keyword={key}&act=get_product_flow_search&page={page}&cid=01.00.00.00.00.00'


def getSpecificationURL(id):
    return f'http://product.m.dangdang.com/detail{id}-1-1.html'


def getJikeURL(ISBN):
    return f'https://api.jike.xyz/situ/book/isbn/{ISBN}'


headers = {
    'Connection': 'keep-alive',
    'Pragma': 'no-cache',
    'Cache-Control': 'no-cache',
    'Accept': 'application/json',
    'User-Agent': 'Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Mobile Safari/537.36 Edg/95.0.1020.53',
    'X-Requested-With': 'XMLHttpRequest',
    'Referer': 'http://search.m.dangdang.com/search.php',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,ru;q=0.5',
}


class BooksSpider(scrapy.Spider):

    name = 'books'

    # 防止不传入参数时报错
    jobID = 1
    key = '网络'
    maxCount = 0

    # 已经保存的记录计数
    count = 0
    # 是否已经输出了总数
    outputTotal = False

    def start_requests(self):
        yield scrapy.Request(
            # 从第一页开始
            url=getURL(self.key, 1),
            callback=self.parseBookList,
            headers=headers,
            meta={
                "page": 1
            }
        )

    # 解析搜索列表
    def parseBookList(self, response):
        responseJson = response.json()
        if responseJson['errorCode'] != 0:
            outputInfo('parseError', responseJson["errorCode"], self)
            return

        # 输出一次总数估计
        if not self.outputTotal:
            self.outputTotal = True
            outputInfo('totalCount', int(
                responseJson['page']['pagecount'])*10, self)

        bookList = responseJson['products']

        for i in bookList:
            yield scrapy.Request(
                url=getSpecificationURL(i['id']),
                callback=self.parseSpecification,
                meta={
                    "record": {'author': i['authorname'],
                               'publisher': i['publisher'],
                               'name': i['name'],
                               'price': i['price'],
                               'cover': i['image_url'],
                               'id': i['id'],
                               'publishDate': i['publish_date'],
                               'CommentCount': i['total_review_count'],
                               'AverageScore': i['score'],
                               }
                })

        outputInfo('pageCount', {
            "page": response.meta['page'],
            "count": 10,
            "notBook": 0
        }, self)

        # 制定了最大页数，则判断是否达限
        if int(self.maxCount) and response.meta['page'] >= math.ceil(int(self.maxCount)/10):
            outputInfo('success', 'max page', self)
            return
        # 判断是否还有下一页
        if int(responseJson['page']['pagecount']) <= response.meta['page']:
            outputInfo('success', 'complete', self)
            return
        # 否则爬取下一页
        yield scrapy.Request(
            url=getURL(self.key, response.meta['page']+1),
            callback=self.parseBookList,
            headers=headers,
            meta={
                "page": response.meta['page']+1
            }
        )

    def parseSpecification(self, response):

        # inspect_response(response, self)
        # 获取出版信息
        result = {}
        for i in response.selector.css('.parms li'):
            result[i.css('em').xpath('string(.)').get()] = i.css(
                'i').xpath('string(.)').get()

        finalResult = response.meta["record"]
        if not finalResult['publisher']:
            finalResult['publisher'] = result['出版社'] if '出版社' in result else ''
        if not finalResult['author']:
            finalResult['author'] = result['作者'] if '作者' in result else ''
        finalResult["publishDate"] = result['出版时间'] if '出版时间' in result else ''
        finalResult["ISBN"] = result['ISBN'] if 'ISBN' in result else ''

        # 没有 ISBN 的不要
        try:
            finalResult["ISBN"] = isbnlib.Isbn(finalResult["ISBN"]).ean13
        except:
            outputInfo('abandonISBN', finalResult['id'], self)
            return

        # 获取摘要
        tmpDic = {}
        tmpKey = ''
        tmpVal = ''

        for i in response.selector.xpath('descendant-or-self::section[@data-content-name = "图书详情"]//text()'):
            if not i.get().strip():
                continue
            if i.get().strip()[0] == '【':
                if tmpVal and tmpKey:
                    tmpDic[tmpKey] = tmpVal
                    tmpKey = ''
                    tmpVal = ''
                tmpKey = i.get().strip()[1:-1]
            else:
                tmpVal += f'<p>{i.get().strip()}</p>'

        if tmpVal and tmpKey:
            tmpDic[tmpKey] = tmpVal

        finalResult['digest'] = ''
        if '内容简介' in tmpDic:
            finalResult['digest'] = tmpDic['内容简介']
        elif '简介' in tmpDic:
            finalResult['digest'] = tmpDic['简介']
        elif '内容' in tmpDic:
            finalResult['digest'] = tmpDic['内容']
        elif '编辑推荐' in tmpDic:
            finalResult['digest'] = tmpDic['编辑推荐']

        yield finalResult

    def closed(self, reason):
        outputInfo('closed', reason, self)
