import pymysql
import requests
from lxml import etree
from multiprocessing import Pool


def scrape_page(page):
    db = pymysql.connect(user="root", password="544002", host="localhost", database="shiyu", port=3307, charset="utf8")
    headers = {
        "sec-ch-ua": "\"Google Chrome\";v=\"119\", \"Chromium\";v=\"119\", \"Not?A_Brand\";v=\"24\"",
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Referer": "https://www.bige3.cc/finish/",
        "X-Requested-With": "XMLHttpRequest",
        "sec-ch-ua-mobile": "?0",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
        "sec-ch-ua-platform": "\"Windows\""
    }

    url = "https://www.bige3.cc/json"
    params = {
        "sortid": "0",
        "page": page
    }

    response = requests.get(url, headers=headers, params=params).json()

    for respon in response:
        url_list = respon["url_list"]
        url_img = respon["url_img"]
        articlename = respon["articlename"]
        author = respon["author"]
        intro = respon["intro"]
        chapter_url = "https://www.bige3.cc/" + url_list
        chapter_response = requests.get(chapter_url, headers=headers)
        chapter_result = chapter_response.content.decode()
        chapter_ = etree.HTML(chapter_result)
        result = chapter_.xpath("//div[@class='listmain']/dl//dd/a")
        for chapter in result:
            chapter_name = chapter.xpath("./text()")[0]
            chapter_url_ = "https://www.bige3.cc/" + chapter.xpath("./@href")[0]
            if "展开全部章节" not in chapter_name:
                article_ = requests.get(chapter_url_, headers=headers).content.decode()
                author_ = etree.HTML(article_).xpath("//div[@id='chaptercontent']/text()")[1:][:-2]
                author_content = ""
                for content in author_:
                    author_content = author_content + content
                print(articlename, "---", url_img, "---", intro, "---", author, "---", chapter_name, "---",
                      chapter_url_, "---", author_content)

                # 数据库操作
                sql = "insert into story_bxg(articlename,url_img,intro,author,chapter_name,chapter_url_,author_content) values(%s," \
                      "%s,%s,%s,%s,%s,%s)"
                par = (articlename, url_img, intro, author, str(chapter_name), chapter_url_, author_content)
                cursor = db.cursor()
                try:
                    cursor.execute(sql, par)
                    db.commit()
                    print("写入数据库成功")
                except Exception as e:
                    db.rollback()
                    print(e)
                cursor.close()

    db.close()


if __name__ == "__main__":
    # 设置进程池大小
    pool_size = 34
    # 创建进程池
    pool = Pool(pool_size)

    # 要爬取的页面数量
    num_pages = 100
    # 使用进程池并行爬取多个页面
    pool.map(scrape_page, range(1, num_pages + 1))
    # 关闭进程池
    pool.close()
    pool.join()
