from email import header
from time import sleep
from typing import final
import scrapy
import json
from scrapy.selector import Selector
from scrapy.shell import inspect_response
import isbnlib
import math
from books.utils import extract_chinese, outputInfo
import requests
import cn2an

from dotenv import dotenv_values

envs = dotenv_values('../.env')

# 搜索结果列表


def getURL(key, page, pageSize=100):
    return f'https://wqsou.jd.com/search/searchn?key=图书 {key}&datatype=1&page={page}&pagesize={pageSize}&sceneval=2'

# 在手机客户端显示的详情页面


def getMobileDetailURL(id):
    return f'https://in.m.jd.com/product/graphext/{id}.html'


def getCommentURL(id):
    return f'https://wq.jd.com/commodity/comment/getcommentlist?callback=skuJDEvalA&version=v2&pagesize=1&sceneval=2&score=0&sku={id}&sorttype=5&page=1&t=0.3775382603153272'


def getCommentCountURL(bookList):
    return f"https://club.jd.com/comment/productCommentSummaries.action?referenceIds={','.join([i['wareid'] for i in bookList])}&callback=jQuery753662&_=1643031275742"


proxyURL = envs['PROXY_URL']


# 搜索页 headers
headers = {
    'authority': 'wqsou.jd.com',
    'pragma': 'no-cache',
    'cache-control': 'no-cache',
    'sec-ch-ua': '"Microsoft Edge";v="95", "Chromium";v="95", ";Not A Brand";v="99"',
    'sec-ch-ua-mobile': '?1',
    'user-agent': 'Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Mobile Safari/537.36 Edg/95.0.1020.44',
    'sec-ch-ua-platform': '"Android"',
    'accept': '*/*',
    'sec-fetch-site': 'same-site',
    'sec-fetch-mode': 'no-cors',
    'sec-fetch-dest': 'script',
    'referer': 'https://wq.jd.com/',
    'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,ru;q=0.5'
}

commentHeaders = {
    'authority': 'wq.jd.com',
    'pragma': 'no-cache',
    'cache-control': 'no-cache',
    'sec-ch-ua': '"Microsoft Edge";v="95", "Chromium";v="95", ";Not A Brand";v="99"',
    'sec-ch-ua-mobile': '?1',
    'user-agent': 'Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Mobile Safari/537.36 Edg/95.0.1020.44',
    'sec-ch-ua-platform': '"Android"',
    'accept': '*/*',
    'sec-fetch-site': 'same-site',
    'sec-fetch-mode': 'no-cors',
    'sec-fetch-dest': 'script',
    'referer': 'https://item.m.jd.com/',
    'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,ru;q=0.5'
}

commentCountHeaders = {
    'Connection': 'keep-alive',
    'sec-ch-ua': '" Not;A Brand";v="99", "Microsoft Edge";v="97", "Chromium";v="97"',
    'sec-ch-ua-mobile': '?0',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36 Edg/97.0.1072.62',
    'sec-ch-ua-platform': '"Windows"',
    'Accept': '*/*',
    'Sec-Fetch-Site': 'same-site',
    'Sec-Fetch-Mode': 'no-cors',
    'Sec-Fetch-Dest': 'script',
    'Referer': 'https://search.jd.com/',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
}


class BooksSpider(scrapy.Spider):

    name = 'books'

    # 防止不传入参数时报错
    jobID = 1
    key = '网络'
    maxCount = 0

    # 已经保存的记录计数
    count = 0
    # 是否已经输出了总数
    outputTotal = False

    proxy = None


    def get_ip(self):
        response = requests.request("GET", proxyURL)
        try:
            # inspect_response(response, self)
            responseJson = json.loads(response.text)
            assert int(responseJson['code']) == 0
            self.proxy = 'http://'+responseJson['data'][0]['ip'] + \
                ':'+str(responseJson['data'][0]['port'])
            outputInfo('proxy', self.proxy, self)
        except:
            outputInfo('fail', 'parse proxy ip error', self)
            return

    def start_requests(self):
        self.get_ip()

        yield scrapy.Request(
            # 从第一页开始
            url=getURL(self.key, 1),
            callback=self.parseBookList,
            headers=headers,
            meta={
                "page": 1,
                "proxy": self.proxy
            }
        )

    # 解析搜索页面
    def parseBookList(self, response):
        responseJson = None
        # inspect_response(response, self)
        try:
            responseJson = json.loads(
                response.text[9:-1]
            )
            assert responseJson['retcode'] == '0'
        except:
            # 搜索页面错误
            if responseJson and responseJson['retcode'] == '100024':
                # 说明已经没有更多数据了
                outputInfo('success', 'complete', self)
                return

#             inspect_response(response, self)
            self.get_ip()
            page = response.meta['page']
            yield scrapy.Request(
                url=getURL(self.key, page),
                callback=self.parseBookList,
                headers=headers,
                meta={
                    "page": page,
                    "proxy": self.proxy
                },
                dont_filter=True
            )
#             outputInfo('fail', 'parse error', self)
            return

        # 输出一次总数估计
        if not self.outputTotal:
            self.outputTotal = True
            outputInfo('totalCount', int(
                responseJson['data']['searchm']['Head']['Summary']['ResultCount']), self)

        bookList = responseJson['data']['searchm']['Paragraph']

        if not len(bookList):
            # 说明已经没有更多数据了
            outputInfo('success', 'complete', self)
            return

        # 不是图书类目的数量统计
        originalLen = len(bookList)
        bookList = [i for i in bookList if i["cid1"] == "1713"]
        outputInfo('pageCount', {
            "page": response.meta['page'],
            "count": originalLen,
            "notBook": originalLen-len(bookList)
        }, self)

        yield scrapy.Request(
            url=getCommentCountURL(bookList),
            callback=self.parseCommentCount,
            headers=commentCountHeaders,
            meta={
                "bookList": bookList,
#                 "proxy": self.proxy
            }
        )

        # 制定了最大页数，则判断是否达限
        if int(self.maxCount) and response.meta['page'] >= math.ceil(int(self.maxCount)/100):
            outputInfo('success', 'max page', self)
            return

        # 否则爬取下一页
        yield scrapy.Request(
            url=getURL(self.key, response.meta['page']+1),
            callback=self.parseBookList,
            headers=headers,
            meta={
                "page": response.meta['page']+1,
                "proxy": self.proxy
            }
        )

    def parseCommentCount(self, response):
        responseJson = None
        try:
            responseJson = json.loads(
                response.text[13:-2]
            )
        except:
            bookList = request.meta['bookList']
            self.get_ip()
            yield scrapy.Request(
                url=getCommentCountURL(bookList),
                callback=self.parseCommentCount,
                headers=commentCountHeaders,
                meta={
                    "bookList": bookList,
                    "proxy": self.proxy
                },
                dont_filter=True
            )
            return
        commentCountDict = {}
        for i in responseJson['CommentsCount']:
            commentCountDict[int(i['SkuId'])] = i
        for i in response.meta['bookList']:
            show=commentCountDict[int(i['wareid'])]['CommentCountStr']
            if show[-1]=='+':
                show=show[:-1]
            try:
                show=cn2an.cn2an(show,'smart')
            except:
                outputInfo('fail', 'parse CommentCountStr error', self)
                show=0
            yield scrapy.Request(
                url=getMobileDetailURL(i['wareid']),
                callback=self.parseMobileDetail,
                meta={
                    "record": {
                        'author': i['Content']['author'],
                        'publisher': i['Content']['publishers'],
                        'name': i['Content']['warename'],
                        'price': i['dredisprice'],
                        'cover': i['Content']['imageurl'],
                        'id': i['wareid'],
                        'CommentCount': show,
                        'AverageScore': commentCountDict[int(i['wareid'])]['GoodRateShow'],
                    }
                })

    # 解析详情页
    ISBNMap = {}

    def parseMobileDetail(self, response):
        # inspect_response(response, self)
        # 获取出版信息
        result = {}
        for line in response.selector.css('.newspecification-table-box').css('tr'):
            s = line.css('td').xpath('string(.)').getall()
            if len(s) == 2:
                result[s[0]] = s[1].strip()

        # 获取出版信息（新版页面）
        if not len(result.keys()):
            try:
                specData = response.selector.css(
                    '#wareGuigNew').attrib['value']
                if specData:
                    for group in json.loads(specData)["propGroups"]:
                        for attr in group['atts']:
                            result[attr['attName']] = ','.join(attr['vals'])
            except:
                pass

        # 获取摘要
        digest = ''
        tmpVal = ''
        tmpKey = ''
        arr = response.selector.css('#IntroductionModule>.part-box').xpath(
            '//img/@alt|//*[not(self::script or self::style)]/text()').getall()
        for line in [a.strip() for a in arr if a.strip()]:
            cleanLine = extract_chinese(line)
            if cleanLine in [
                '作者简介', '价格说明', '规格参数',  '目录',
                '内容简介', '主编推荐', '内容介绍', '作者介绍',
                '关联推荐', '编辑推荐', '基本信息', '媒体评论',
                '在线试读部分章节', '内容推荐', '前言', '在线读',
                '内容', '内容提要', '编辑推荐语'
            ]:
                if tmpVal and tmpKey:
                    result[tmpKey] = tmpVal
                tmpVal = ''
                tmpKey = cleanLine
            else:
                tmpVal += '<p>'+line+'</p>'
        result[tmpKey] = tmpVal

        for digestKey in [
            '内容简介', '内容介绍', '内容提要',
            '内容', '内容推荐', '编辑推荐',
            '主编推荐', '编辑推荐语'
        ]:
            if digestKey in result:
                digest = result[digestKey]
                break

        # 整理信息
        finalResult = response.meta["record"]
        finalResult['digest'] = digest
        if not finalResult['publisher']:
            finalResult['publisher'] = result['出版社'] if '出版社' in result else ''
        if not finalResult['author']:
            finalResult['author'] = result['著者'] if '著者' in result else ''
        if not finalResult['author']:
            finalResult['author'] = result['编者'] if '编者' in result else ''
        finalResult['translator'] = result['译者'] if '译者' in result else ''
        finalResult["publishDate"] = result['出版时间'] if '出版时间' in result else ''
        finalResult["ISBN"] = result['ISBN'] if 'ISBN' in result else ''

        # 没有 ISBN 的不要
        try:
            finalResult["ISBN"] = isbnlib.Isbn(finalResult["ISBN"]).ean13
        except:
            outputInfo('abandonISBN', finalResult['id'], self)
            return

        yield finalResult

    def closed(self, reason):
        outputInfo('closed', reason, self)
