from bs4 import BeautifulSoup
from peewee import *
import pymysql
import logging
from typing import List, Dict, Optional

# 配置日志记录
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 数据库配置
DB_CONFIG = {
    "database": "douban_movies",
    "host": "localhost",
    "port": 3306,
    "user": "root",
    "password": "lyj18990572378!",
    "charset": "utf8mb4"
}


class DatabaseManager:
    """管理数据库连接和表操作"""
    
    def __init__(self):
        self.db = MySQLDatabase(**DB_CONFIG)
        
    def connect(self):
        """连接到数据库"""
        try:
            self.db.connect()
            logger.info("成功连接到数据库")
        except Exception as e:
            logger.error(f"数据库连接失败: {e}")
            raise
            
    def disconnect(self):
        """关闭数据库连接"""
        if not self.db.is_closed():
            self.db.close()
            logger.info("数据库连接已关闭")
            
    def create_tables(self):
        """创建数据表"""
        try:
            self.db.create_tables([Movie], safe=True)
            logger.info("数据表已就绪")
        except Exception as e:
            logger.error(f"创建表失败: {e}")
            raise


class Movie(Model):
    """电影数据模型"""
    title = CharField(max_length=200)
    rating = FloatField()
    rating_count = IntegerField()
    directors = CharField(max_length=200, null=True)
    actors = CharField(max_length=500, null=True)
    release_year = CharField(max_length=10, null=True)
    country = CharField(max_length=100, null=True)
    genres = CharField(max_length=200, null=True)
    cover_url = CharField(max_length=200, null=True)
    duration = CharField(max_length=50, null=True)
    
    class Meta:
        database = None  # 将在运行时设置
        table_name = 'movies'


class MovieParser:
    """解析HTML并提取电影数据"""
    
    @staticmethod
    def parse_movie_item(item) -> Optional[Dict]:
        """解析单个电影条目"""
        try:
            # 标题
            title = item.select_one('.title').get_text(strip=True)
            
            # 评分信息
            rating = float(item.select_one('.rating_num').get_text(strip=True))
            rating_count_text = item.select_one('.star span:last-child').get_text()
            rating_count = int(rating_count_text.strip('人评价'))
            
            # 基本信息
            info_text = item.select_one('.bd p').get_text(strip=True, separator='|')
            info_parts = [part.strip() for part in info_text.split('|') if part.strip()]
            
            directors, actors = "", ""
            for part in info_parts:
                if part.startswith('导演:'):
                    directors = part.replace('导演:', '').strip()
                elif part.startswith('主演:'):
                    actors = part.replace('主演:', '').strip()
            
            # 详细信息
            details = item.select_one('.bd p:nth-of-type(2)').get_text(strip=True, separator='/')
            details = [d.strip() for d in details.split('/')]
            
            year = details[0] if len(details) > 0 else None
            country = details[1] if len(details) > 1 else None
            genres = details[2] if len(details) > 2 else None
            duration = details[3] if len(details) > 3 else None
            
            # 封面图片
            cover_url = item.select_one('.pic img')['src']
            
            return {
                'title': title,
                'rating': rating,
                'rating_count': rating_count,
                'directors': directors,
                'actors': actors,
                'release_year': year,
                'country': country,
                'genres': genres,
                'duration': duration,
                'cover_url': cover_url
            }
            
        except Exception as e:
            logger.error(f"解析电影条目失败: {e}")
            return None
    
    @staticmethod
    def parse_html_file(file_path: str) -> List[Dict]:
        """解析HTML文件并返回电影数据列表"""
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                html = f.read()
                
            soup = BeautifulSoup(html, 'lxml')
            movie_items = soup.select('.grid_view li')
            
            movies = []
            for item in movie_items:
                movie_data = MovieParser.parse_movie_item(item)
                if movie_data:
                    movies.append(movie_data)
                    
            return movies
            
        except Exception as e:
            logger.error(f"解析HTML文件失败: {e}")
            return []


class DataProcessor:
    """处理数据存储和业务逻辑"""
    
    @staticmethod
    def save_movies(movies: List[Dict]):
        """将电影数据保存到数据库"""
        try:
            with db.atomic():
                for movie in movies:
                    Movie.create(**movie)
            logger.info(f"成功保存{len(movies)}条电影数据")
        except Exception as e:
            logger.error(f"保存数据失败: {e}")
            raise


def main():
    """主程序"""
    try:
        # 初始化数据库
        db_manager = DatabaseManager()
        db_manager.connect()
        Movie._meta.database = db_manager.db  # 设置模型数据库
        db_manager.create_tables()
        
        # 解析HTML文件
        parser = MovieParser()
        movies = parser.parse_html_file('douban.html')
        
        # 保存数据
        if movies:
            DataProcessor.save_movies(movies)
        else:
            logger.warning("未解析到任何电影数据")
            
    except Exception as e:
        logger.error(f"程序运行出错: {e}")
    finally:
        db_manager.disconnect()
        logger.info("程序执行完毕")


if __name__ == '__main__':
    main()