# -*- coding:utf-8 -*-
import scrapy
import requests
from lxml import etree
import re
import pymysql


headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36'
        , 'Cookie': 'bid=gkQLV26H5iY; __utmc=30149280; viewed="5337254_1068920_1007305_35494160_1813841_3261600_27002046_5337248_5414391_25985021"; ap_v=0,6.0; __utma=30149280.1216680356.1717766516.1717942560.1717988194.10; __utmz=30149280.1717988194.10.3.utmcsr=search.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/book/subject_search; frodotk_db="6416e87556a0a4d62c44b96003e1ee82"; push_noty_num=0; push_doumail_num=0; ct=y; __utmt_douban=1; dbcl2="200146926:yc0KSbhVj2k"; ck=fLap; _pk_ref.100001.8cb4=%5B%22%22%2C%22%22%2C1717991968%2C%22https%3A%2F%2Faccounts.douban.com%2F%22%5D; _pk_id.100001.8cb4=d5d653af7029be2b.1717991968.; _pk_ses.100001.8cb4=1; __utmt=1; __utmv=30149280.20014; __utmb=30149280.19.10.1717988194; __yadk_uid=V3EYD2jV7GuhamFrlRnJiWgo1fqbNuw4'
}


class PreSpider:
    def __init__(self):
        self.headers = headers
        self.url = f'''https://book.douban.com/top250'''
        self.pre_mysql()

    def book_href(self):
        page_url, book_dict, book_list = [], {}, []
        page_num = int(250/25)
        for i in range(page_num):
            print(f'''开始爬第{i + 1}页''')
            url = self.url + f'''?start={i*25}'''
            page_url.append(url)
            response = requests.get(url=url, headers=self.headers).text
            # print(response)
            selector = etree.HTML(response)
            book_name = selector.xpath("//a/@title")
            book_href = selector.xpath("//td[@valign='top']/a/@href")
            for j in range(len(book_name)):
                book_dict[book_name[j]] = book_href[j]
                book_list.append(book_href[j])
        print(len(book_dict))
        print(book_dict)
        return page_url, book_dict, book_list

    def pre_mysql(self):
        db = pymysql.connect(host='localhost',
                             user='root',
                             port=3306,  # 端口
                             password='',
                             database='mysql')
        cursor = db.cursor()
        sql_drop = f'''
                    DROP TABLE IF EXISTS DOU_BAN_BOOK_TOP250;
                    '''
        cursor.execute(sql_drop)
        sql_creat_table = f'''
                    CREATE TABLE IF NOT EXISTS DOU_BAN_BOOK_TOP250(
                    id INT NOT NULL AUTO_INCREMENT COMMENT 'id',
                    book_name VARCHAR(100) NOT NULL,
                    author VARCHAR(100) NOT NULL,
                    press VARCHAR(100),
                    publication_year VARCHAR(100),
                    page_num VARCHAR(100),
                    price VARCHAR(100),
                    binding VARCHAR(100),
                    series VARCHAR(100),
                    ibsn VARCHAR(100),
                    score DOUBLE,
                    content VARCHAR(10000) character SET utf8,
                    PRIMARY KEY (id,book_name)
                    ) COMMENT='豆瓣书籍TOP250表' ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;'''
        cursor.execute(sql_creat_table)
        print(f'''数据库表 DOU_BAN_BOOK_TOP250 删除并创建成功 !!!''')


class BooktopSpider(scrapy.Spider):
    pre = PreSpider()
    name = "BookTop"
    allowed_domains = ["book.douban.com"]
    start_urls = pre.book_href()[2]

    def start_requests(self):
        # 除了内容简介，其余信息均在初始页面获取
        print(len(self.start_urls))
        print(self.start_urls)
        for i in self.start_urls:
            yield scrapy.Request(url=i, callback=self.parse, headers=headers)

    def parse(self, response, **kwargs):
        # print(response.text)
        data = {}
        book_name = response.xpath("//span[@property='v:itemreviewed']/text()").extract()[0]

        temp = response.xpath("//div[@id='info']//text()").extract()  # 获取所有信息
        other = []
        for mes in temp:
            if mes.strip('\n').strip() != '':
                other.append(re.sub(r'\s+', ' ', mes.strip('\n').strip().replace('\'','').replace(': ',' ').replace(',',' ')))
        t = ','.join(other)
        t1 = t.replace(',:,',':,').replace(':,',':').replace('作者,','作者').replace(',/,','和').replace('页数:定价','页数:0,定价')
        t2 = t1.replace(':', '\':\'').replace(',', '\',\'')
        t3 = '{\'' + t2 + '\'}'
        other_dict = eval(t3)
        try:
            other_dict['作者'] = other_dict['作者']
        except KeyError:
            other_dict['作者'] = None
        try:
            other_dict['出版社'] = other_dict['出版社']
        except KeyError:
            other_dict['出版社'] = None
        try:
            other_dict['丛书'] = other_dict['丛书']
        except KeyError:
            other_dict['丛书'] = None
        try:
            other_dict['出版年'] = other_dict['出版年']
        except KeyError:
            other_dict['出版年'] = None
        try:
            other_dict['页数'] = other_dict['页数']
        except KeyError:
            other_dict['页数'] = None
        try:
            other_dict['定价'] = other_dict['定价'][:-1]
        except KeyError:
            other_dict['定价'] = None
        try:
            other_dict['装帧'] = other_dict['装帧']
        except KeyError:
            other_dict['装帧'] = None
        try:
            other_dict['ISBN'] = other_dict['ISBN']
        except KeyError:
            other_dict['ISBN'] = None

        score = float(response.xpath("//strong/text()").extract()[0])

        validate = response.xpath("//div[@class='intro']//text()").extract()
        if '展开全部' in validate:
            content = response.xpath("//span[@class='all hidden']//text()").extract()
            content2 = []
            for cont in content:
                if cont.strip('\n').strip() != '' and 'text-indent' not in cont.strip('\n').strip():
                    content2.append(cont.strip('\n').strip())
            content = '。'.join(content2)
        else:
            content = response.xpath("//div[@class='indent' and @id='link-report']/div[@class='']//text()").extract()
            content2 = []
            for cont in content:
                if cont.strip('\n').strip() != '' and 'text-indent' not in cont.strip('\n').strip():
                    content2.append(cont.strip('\n').strip())
            content = ''.join(content2)
        content = content.replace('👾','')  # 处理特殊字符

        data.update({'book_name': book_name
                     , 'author': other_dict['作者']
                     , 'press': other_dict['出版社']
                     , 'series': other_dict['丛书']
                     , 'publication_year': other_dict['出版年']
                     , 'page_num': other_dict['页数']
                     , 'price': other_dict['定价']
                     , 'binding': other_dict['装帧']
                     , 'ibsn': other_dict['ISBN']
                     , 'score': score
                     , 'content': content})
        yield data



