from threading import Thread, Lock
import redis
import requests
from lxml import etree
import json
import re
import MySQLdb
import os

"""
mysql数据库数据批量插入还是1000一次就好，太多可能插入不进去
目前该代码没有什么问题，异常处理方面可能存在缺陷，因为没有添加太多异常处理
"""


# MySQL连接配置
def get_mysql_connection():
    return MySQLdb.connect(
        host='localhost',
        user='root',  # 替换为你的用户名
        password='',  # 替换为你的密码
        database='bilibili',  # 替换为你的数据库名
        port=3306,
        autocommit=True  # 自动提交事务
    )


# 锁定文件操作，确保线程安全
file_lock = Lock()


def write_buffered_chapters(chapter_buffer):
    """将缓存的章节内容写入文件中"""
    with file_lock:
        for chapter in chapter_buffer:
            story_name = chapter['story_name']
            chapter_name = chapter['chapter_name']
            content = chapter['content']

            folder_name = story_name.replace(" ", "_").replace("/", "_")
            folder_path = os.path.join(r'E:\novels', folder_name)
            os.makedirs(folder_path, exist_ok=True)

            file_path = os.path.join(folder_path, f"{chapter_name}.txt")
            try:
                with open(file_path, 'w', encoding='utf-8') as f:
                    f.write(content)
                print(f"写入章节文件: {file_path}")
            except Exception as e:
                print(f"文件写入失败: {e}")


# 断点续爬
def parse_chapter():
    global redis_conn
    db_conn = get_mysql_connection()
    cursor = db_conn.cursor()

    # 用来存储插入数据的列表
    chapter_data = []

    # 缓存每本小说的章节内容
    chapter_buffer = []

    while redis_conn.llen('zongheng_task_queue') > 0:
        task = redis_conn.lpop('zongheng_task_queue')
        task = json.loads(task)
        story_name = task['story_name']
        chapter_name = task['chapter_name']
        chapter_url = task['chapter_url']
        response = requests.get(chapter_url)
        root = etree.HTML(response.content.decode())
        content = "\n".join(root.xpath("//div[@class='content']/p/text()"))
        print(chapter_url)
        book_id = re.search(r'/chapter/(\d+)/(\d+)\.html', chapter_url).group(1)
        chapter_id = re.search(r'/chapter/(\d+)/(\d+)\.html', chapter_url).group(2)

        # 将章节数据添加到批量插入的列表中
        chapter_data.append((chapter_id, chapter_name, content, book_id))

        print(f"id:{chapter_id}《{story_name}》---《{chapter_name}》--- {content} --- 外键:{book_id}")
        # 添加到文件写入缓冲
        chapter_buffer.append({
            'story_name': story_name,
            'chapter_name': chapter_name,
            'content': content
        })

        # 每满100章，写入文件
        if len(chapter_buffer) >= 100:
            write_buffered_chapters(chapter_buffer)
            chapter_buffer.clear()

        # 每处理一万条数据，就进行一次数据库提交
        if len(chapter_data) >= 100:
            try:
                insert_query = """
                           INSERT INTO tb_chapter (id, chapter_name, content, book_id)
                           VALUES (%s, %s, %s, %s)
                       """
                cursor.executemany(insert_query, chapter_data)  # 批量插入数据
                db_conn.commit()  # 提交事务
                print(f"成功提交 {len(chapter_data)} 条数据到数据库")
                chapter_data.clear()  # 清空列表，准备下一批数据
            except MySQLdb.MySQLError as e:
                print(f"插入失败: {e}")
                db_conn.rollback()  # 出现数据库错误时回滚事务

        # 写入剩余章节（未满100）
    if chapter_buffer:
        write_buffered_chapters(chapter_buffer)

    # 提交剩余的数据（如果有的话）
    if chapter_data:
        try:
            insert_query = """
                       INSERT INTO tb_chapter (id, chapter_name, content, book_id)
                       VALUES (%s, %s, %s, %s)
                   """
            cursor.executemany(insert_query, chapter_data)  # 批量插入数据
            db_conn.commit()  # 提交事务
            print(f"成功提交 {len(chapter_data)} 条剩余数据到数据库")
        except MySQLdb.MySQLError as e:
            print(f"插入失败: {e}")
            db_conn.rollback()  # 出现数据库错误时回滚事务
            print(f"SQL 错误详情: {e.args}")

        # 关闭数据库连接
    cursor.close()
    db_conn.close()


if __name__ == '__main__':
    redis_conn = redis.Redis()
    # 消费者
    thead_list = [Thread(target=parse_chapter) for _ in range(8)]
    for t in thead_list:
        t.start()
    for t in thead_list:
        t.join()
