"""
该模块用于从豆瓣电影页面中提取数据，并将数据安全存储到 MySQL 数据库中。
"""

from bs4 import BeautifulSoup
from peewee import MySQLDatabase, Model, CharField, FloatField, IntegerField
import logging

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 数据库连接配置
db = MySQLDatabase(
    "douban_movie",
    host="localhost",
    port=3306,
    user="root",
    password="3218560376jJ"
)

class Movie(Model):
    """
    数据库模型，表示豆瓣电影信息。

    Fields:
        title       电影标题 (CharField)
        rating_num  评分 (FloatField)
        comment_num 评论数 (IntegerField)
        directors   导演 (CharField)
        actors      主演 (CharField)
        year        上映年份 (IntegerField)  # 修改为整型
        country     国家 (CharField)
        category    类型 (CharField)
        pic         封面链接 (CharField)
    """
    title = CharField(max_length=200)
    rating_num = FloatField()
    comment_num = IntegerField()
    directors = CharField(max_length=200)
    actors = CharField(max_length=200)
    year = IntegerField()  # 年份改为整型
    country = CharField(max_length=100)
    category = CharField(max_length=100)
    pic = CharField(max_length=200)

    class Meta:
        database = db
        table_name = 'douban_movie'

def initialize_database():
    """初始化数据库，安全创建表"""
    try:
        db.connect()
        # 安全模式：仅当表不存在时创建
        db.create_tables([Movie], safe=True)
        logger.info("数据库表已初始化")
    except Exception as e:
        logger.error(f"数据库初始化失败: {e}")
    finally:
        db.close()

def parse_movie_data(html_content: str):
    """解析HTML内容并返回电影数据列表"""
    movies = []
    soup = BeautifulSoup(html_content, 'lxml')
    movie_items = soup.select('ol.grid_view li')

    for item in movie_items:
        try:
            # 提取标题
            title = item.select_one('span.title').get_text(strip=True)

            # 提取评分
            rating_num = float(item.select_one('span.rating_num').get_text(strip=True))

            # 提取评论数（处理“人评价”后缀）
            comment_text = item.select('span')[-1].get_text(strip=True)
            comment_num = int(comment_text.replace('人评价', ''))

            # 提取导演和演员
            info_line = item.select_one('div.bd p').get_text(strip=True, separator='|').split('|')
            directors = info_line[0].replace('导演:', '').strip()
            actors = info_line[1].replace('主演:', '').strip() if len(info_line) > 1 else ''

            # 提取年份、国家、类型
            details = item.select('div.bd p')[1].get_text(strip=True, separator='/').split('/')
            year = int(details[0].strip())  # 确保年份为整数
            country = details[1].strip()
            category = details[2].strip()

            # 提取封面链接
            pic = item.select_one('img')['src']

            movies.append({
                'title': title,
                'rating_num': rating_num,
                'comment_num': comment_num,
                'directors': directors,
                'actors': actors,
                'year': year,
                'country': country,
                'category': category,
                'pic': pic
            })
        except Exception as e:
            logger.error(f"解析电影数据失败: {e}")
    return movies

def save_to_database(movies_data: list):
    """将电影数据批量保存到数据库"""
    try:
        db.connect()
        with db.atomic():  # 使用事务提升插入效率
            for data in movies_data:
                # 去重检查：根据标题和年份判断是否已存在
                if not Movie.select().where(
                    (Movie.title == data['title']) &
                    (Movie.year == data['year'])
                ).exists():
                    Movie.create(**data)
                    logger.info(f"新增电影: {data['title']} ({data['year']})")
                else:
                    logger.warning(f"电影已存在: {data['title']} ({data['year']})")
    except Exception as e:
        logger.error(f"数据库操作失败: {e}")
    finally:
        db.close()

if __name__ == "__main__":
    # 1. 初始化数据库
    initialize_database()

    # 2. 读取本地HTML文件
    try:
        with open('douban.html', 'r', encoding='utf-8') as f:
            html = f.read()
    except FileNotFoundError:
        logger.error("错误：未找到 douban.html 文件")
        exit(1)

    # 3. 解析数据并保存
    movies = parse_movie_data(html)
    if movies:
        save_to_database(movies)
        logger.info("数据存储完成")
    else:
        logger.warning("未解析到有效电影数据")