import datetime
import pymysql
import requests
import json, random
from lxml import etree
from redis import Redis


# noinspection DuplicatedCode,PyMethodMayBeStatic
class DouBanBook(object):
    def __init__(self):
        self.url = "https://read.douban.com/j/kind/"
        self.headers = {
            'Connection': 'keep-alive',
            'Accept': 'application/json',
            'X-CSRF-Token': 'null',
            'X-Requested-With': 'XMLHttpRequest',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36',
            'Content-Type': 'application/json',
            'Origin': 'https://read.douban.com',
            'Sec-Fetch-Site': 'same-origin',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Dest': 'empty',
            'Referer': 'https://read.douban.com/category?page=68&kind=108',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7',
            'Cookie': 'bid=JxWgBTsnVA0; viewed="34815690"; _vwo_uuid_v2=D30B84CEFD341D446EC7C47084971A0CE|46ad1ae0928a0c34b1e0d0bc7d6923a2; douban-fav-remind=1; __utma=30149280.2117608891.1604282573.1604282573.1605506709.2; __utmz=30149280.1605506709.2.2.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; _ga=GA1.3.2117608891.1604282573; _gid=GA1.3.1645800383.1606708813; _pk_ref.100001.a7dd=%5B%22%22%2C%22%22%2C1606791116%2C%22https%3A%2F%2Fwx.qq.com%2F%22%5D; _pk_id.100001.a7dd=09bf7b4333a3eed6.1606708813.3.1606791116.1606727317.; _pk_ses.100001.a7dd=*; _gat=1'
        }
        self.conn = pymysql.connect(host='127.0.0.1', port=3306, db="bookshelf", user='root', passwd='1234', charset='utf8')
        self.cur = self.conn.cursor()
        self.redis_conn = Redis(host='127.0.0.1', port=6379)

    def get_page_data(self, num):
        """post请求  获取列表页数据"""
        post_data = {"sort": "hot", "page": num, "kind": 108,
                     "query": "\n    query getFilterWorksList($works_ids: [ID!]) {\n      worksList(worksIds: $works_ids) {\n        \n    \n    title\n    cover\n    url\n    isBundle\n    coverLabel\n  \n    \n    url\n    title\n  \n    \n    author {\n      name\n      url\n    }\n    origAuthor {\n      name\n      url\n    }\n    translator {\n      name\n      url\n    }\n  \n    \n  abstract\n  authorHighlight\n  editorHighlight\n\n    \n    isOrigin\n    kinds {\n      \n    name @skip(if: true)\n    shortName @include(if: true)\n    id\n  \n    }\n    ... on WorksBase @include(if: true) {\n      wordCount\n      wordCountUnit\n    }\n    ... on WorksBase @include(if: false) {\n      inLibraryCount\n    }\n    ... on WorksBase @include(if: false) {\n      \n    isEssay\n    \n    ... on EssayWorks {\n      favorCount\n    }\n  \n    \n    \n    averageRating\n    ratingCount\n    url\n  \n  \n  \n    }\n    ... on WorksBase @include(if: false) {\n      isColumn\n      isEssay\n      onSaleTime\n      ... on ColumnWorks {\n        updateTime\n      }\n    }\n    ... on WorksBase @include(if: true) {\n      isColumn\n      ... on ColumnWorks {\n        isFinished\n      }\n    }\n    ... on EssayWorks {\n      essayActivityData {\n        \n    title\n    uri\n    tag {\n      name\n      color\n      background\n      icon2x\n      icon3x\n      iconSize {\n        height\n      }\n      iconPosition {\n        x y\n      }\n    }\n  \n      }\n    }\n    highlightTags {\n      name\n    }\n  \n    isInLibrary\n    ... on WorksBase @include(if: false) {\n      \n    fixedPrice\n    salesPrice\n    isRebate\n  \n    }\n    ... on EbookWorks {\n      \n    fixedPrice\n    salesPrice\n    isRebate\n  \n    }\n    ... on WorksBase @include(if: true) {\n      ... on EbookWorks {\n        id\n        isPurchased\n        isInWishlist\n      }\n    }\n  \n        id\n        isOrigin\n      }\n    }\n  ",
                     "variables": {}}
        res = requests.post(self.url, json=post_data, headers=self.headers)
        print(res.status_code, type(res.status_code))
        if res.status_code != 200:
            return False
        data = json.loads(res.content.decode())
        return data

    def get_detail_data(self, url):
        res = requests.get(url, headers=self.headers)
        return res.content.decode()

    def parse_detail_data(self, detail_html):
        html = etree.HTML(detail_html)
        item = dict()
        # 出版时间
        item['pub_date'] = ''
        item['source'] = ''
        for p in html.xpath('//*[@class="article-meta"]/p'):
            if "出版社" not in p.xpath('.//span[1]/text()'):
                continue
            item['pub_date'] = p.xpath(".//span[2]/span[2]/text()")
            item['source'] = p.xpath(".//span[2]/span[1]/text()")
        # 评分
        item['score'] = html.xpath('//span[@class="score"]/text()')
        # 入库时间
        item['save_time'] = datetime.datetime.now()
        # ISBN
        item['ISBN'] = html.xpath('//*[@itemprop="isbn"]/text()')
        # account_id
        item['account_id'] = 1
        for key, value in item.items():
            if isinstance(value, list) and value:
                if "'" in value[0] or '"' in value[0]:
                    item[key] = value[0].replace('"', "`").replace("'", "`")
                else:
                    item[key] = value[0]
            elif isinstance(value, int) and value:
                item[key] = value
            elif isinstance(value, datetime.datetime):
                item[key] = value
            else:
                item[key] = ''
        return item

    def save_data(self, item):
        """存数据库"""
        sql = "replace into books(title,cover,author,source,pub_date,score,introduce,save_time,ISBN,account_id)values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (
            item['title'], item['cover'], item['author'], item['source'], item['pub_date'], item['score'], item['introduce'], item['save_time'],
            item['ISBN'], item['account_id'])
        self.cur.execute(sql)
        self.conn.commit()

    def run(self):
        num = 1
        while True:
            json_data = self.get_page_data(num)
            if not json_data:
                break
            for data in json_data['list']:
                result = self.redis_conn.sadd('db_urls', 'https://read.douban.com' + data['url'])
                if not result:
                    continue
                item = dict()
                # 书名
                item['title'] = data['title']
                # 封面图
                item['cover'] = data['cover']
                # 作者
                item['author'] = ''
                if len(data['author']):
                    item['author'] = data['author'][0]['name']
                else:
                    item['author'] = data['origAuthor'][0]['name']
                # 内容简介
                item['introduce'] = data['abstract']
                detail_url = 'https://read.douban.com' + data['url']
                detail_html = self.get_detail_data(detail_url)
                detail_data = self.parse_detail_data(detail_html)
                item = {**item, **detail_data}
                print(item)
                try:
                    self.save_data(item)
                except Exception as e:
                    print(e)
                    self.redis_conn.srem("db_urls", 'https://read.douban.com' + data['url'])
            num += 1
        self.conn.close()


if __name__ == '__main__':
    db = DouBanBook()
    db.run()

# TODO 获取300次
