import random
import re

import requests
from lxml import etree


def crawler_book(book_id, book_name):
    print(f'开始爬取{book_id}的小说')

    url = f'http://www.diquge.com/book/{book_id}/'
    UAlist = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.0 Safari/605.1.15"
    ]
    UserAgent = random.choice(UAlist)

    headers = {
        'User-Agent': UserAgent,
        'Cookie': 'Hm_lvt_5a95b9b036f3115877c6b70742e12656=1741168886; HMACCOUNT=3833EDEE3D71D336; kuxin_history=384999%2C1; Hm_lpvt_5a95b9b036f3115877c6b70742e12656=1741169031'
    }
    response = requests.get(url, headers=headers)
    html = etree.HTML(response.text)
    url_href = html.xpath('//*[@class="dirlist three clearfix"]/li[*]/a/@href')
    print(f'共{len(url_href)}章')
    # print(url_href)

    # 测试
    url = 'http://www.diquge.com' + url_href[0]
    response = requests.get(url, headers=headers)
    html = etree.HTML(response.text)
    title = html.xpath('//*[@class="title"]/h1/a/text()')
    content = html.xpath('//*[@id="chaptercontent"]/p/text()')

    # print(response.text)
    # print(title)

    res = title[0].split(book_name)[1].strip().split('章')
    res = res[0] + '章 ' + res[1].strip()
    print(res)

    # print(content)
    for i in range(len(content)):
        res = content[i].strip().split("本章未完")
        if len(res) == 2:
            content[i] = '  ' + res[0] + '\n'
            match = re.search(r'共(\d+)页', res[1])
            page_number = match.group(1)
            print(page_number)


def crawler_book_info(book_id):
    print(f'开始爬取{book_id}的小说信息')

    url = f'http://www.diquge.com/book/{book_id}/'
    UAlist = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.0 Safari/605.1.15"
    ]
    UserAgent = random.choice(UAlist)

    headers = {
        'User-Agent': UserAgent,
        'Cookie': 'Hm_lvt_5a95b9b036f3115877c6b70742e12656=1741168886; HMACCOUNT=3833EDEE3D71D336; kuxin_history=384999%2C1; Hm_lpvt_5a95b9b036f3115877c6b70742e12656=1741169031'
    }
    response = requests.get(url, headers=headers)
    html = etree.HTML(response.text)

    info = html.xpath('//*[@class="novelinfo-l"]/ul/li[*]/a/text()')
    author = info[0]
    book_type = info[1]
    new_chapter = info[2].split(' ')[0]
    book_introduction = html.xpath('//*[@class="body novelintro"]/text()')[1].strip()
    book_name = html.xpath('//*[@class="header line"]/h1/text()')[0]
    novel_number = book_id

    img_url = 'http://www.diquge.com/cover/96/b4/02/96b40292f8d66634286925efa56f3917.jpg'
    response = requests.get(url, headers=headers)
    # 保存图片
    with open(f'{book_id}.jpg', 'wb') as f:
        f.write(response.content)

    print(f'作者：{author}')
    print(f'类型：{book_type}')
    print(f'最新章节：{new_chapter}')
    print(f'简介：{book_introduction}')
    print(f'书名：{book_name}')
    print(f'小说编号：{novel_number}')


if __name__ == '__main__':
    crawler_book(9282, '斗破苍穹')
    # crawler_book_info(384999)
