import requests
from bs4 import BeautifulSoup
import re
import os
import xlwt
from datetime import datetime
from typing import List, Dict, Optional


class Movie:
    """电影信息数据类，定义电影信息的全局数据结构"""

    def __init__(self, rank: int = 0):
        self.rank = rank  # 排名
        self.title: str = ""  # 电影标题
        self.url: str = ""  # 电影链接
        self.img_url: str = ""  # 图片URL
        self.rating: float = 0.0  # 评分
        self.rating_count: str = ""  # 评价人数
        self.director: str = ""  # 导演
        self.actors: str = ""  # 主演
        self.genre: str = ""  # 类型
        self.country: str = ""  # 制片国家/地区
        self.year: str = ""  # 年份
        self.img_local_path: str = ""  # 本地图片路径
        self.download_success: bool = False  # 图片下载是否成功

    def to_dict(self) -> Dict:
        """将电影信息转换为字典格式"""
        return {
            '排名': self.rank,
            '电影标题': self.title,
            '电影链接': self.url,
            '图片URL': self.img_url,
            '评分': self.rating,
            '评价人数': self.rating_count,
            '导演': self.director,
            '主演': self.actors,
            '类型': self.genre,
            '制片国家/地区': self.country,
            '年份': self.year,
            '本地图片路径': self.img_local_path,
            '图片下载状态': '成功' if self.download_success else '失败'
        }

    def __str__(self):
        """格式化输出电影信息"""
        return f"""
🎬 电影 #{self.rank}
📽️ 片名: {self.title}
⭐ 评分: {self.rating}
👥 评价: {self.rating_count}
🎭 导演: {self.director}
🎪 主演: {self.actors}
🎨 类型: {self.genre}
🌍 国家: {self.country}
📅 年份: {self.year}
🖼️ 图片: {'✅' if self.download_success else '❌'} {os.path.basename(self.img_local_path) if self.img_local_path else '无图片'}
🔗 链接: {self.url}
        """


class DoubanSpider:
    """豆瓣电影爬虫类"""

    def __init__(self):
        self.base_url = 'https://www.douban.com/doulist/3936288/'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
        }
        self.img_folder = os.path.join(os.getcwd(), 'movie_posters')
        self.movies: List[Movie] = []

    def scrape_movies(self, max_pages: int = 10) -> List[Movie]:
        """
        主爬虫方法：收集豆瓣电影Top250信息并返回Movie对象列表
        :param max_pages: 最大爬取页数
        """
        print("🚀 开始使用requests+BeautifulSoup爬取豆瓣电影Top250信息...")
        print(f"📄 计划爬取页数: {max_pages}")

        current_page = 1
        current_rank = 1

        while current_page <= max_pages:
            try:
                # 构造当前页URL
                if current_page == 1:
                    page_url = self.base_url
                else:
                    page_url = f"{self.base_url}?start={(current_page-1)*25}"

                print(f"📄 爬取第 {current_page} 页: {page_url}")

                # 发送GET请求
                response = requests.get(page_url, headers=self.headers)
                response.encoding = 'utf-8'
                html_content = response.text

                # 解析HTML
                soup = BeautifulSoup(html_content, 'html.parser')

                # 查找电影信息块 - BeautifulSoup可以使用复合类选择器
                movie_blocks = soup.find_all('div', class_='bd doulist-subject')
                print(f"📊 第 {current_page} 页找到 {len(movie_blocks)} 部电影")

                # 如果没有找到电影信息，说明已经到了最后一页
                if len(movie_blocks) == 0:
                    print(f"⚠️ 第 {current_page} 页没有找到电影信息，可能已到最后一页")
                    break

                # 遍历提取电影信息
                for movie_block in movie_blocks:
                    movie = self._extract_movie_info(movie_block, current_rank)
                    if movie:
                        self.movies.append(movie)
                        current_rank += 1

                print(f"✅ 第 {current_page} 页处理完成")
                current_page += 1

                # 页面间延迟，避免请求过快
                import time
                time.sleep(2)

            except Exception as e:
                print(f"❌ 处理第 {current_page} 页时出错: {e}")
                break

        print(f"🎉 requests+BeautifulSoup爬取完成，共获取 {len(self.movies)} 部电影信息")
        return self.movies

    def _extract_movie_info(self, movie_block, rank: int) -> Optional[Movie]:
        """提取单个电影的信息"""
        try:
            movie = Movie(rank)

            # 提取标题和链接
            title_element = movie_block.find('div', class_='title').find('a')
            movie.title = title_element.get_text().strip()
            movie.url = title_element['href'] if title_element.get('href') else ""

            # 提取图片URL
            img_element = movie_block.find('div', class_='post').find('img')
            movie.img_url = img_element['src'] if img_element else ""

            # 提取评分
            rating_element = movie_block.find('span', class_='rating_nums')
            movie.rating = float(rating_element.get_text().strip()) if rating_element else 0.0

            # 提取评价人数
            rating_count_element = movie_block.find('div', class_='rating').find_all('span')[-1]
            movie.rating_count = rating_count_element.get_text().strip() if rating_count_element else ""

            # 提取详细信息
            self._extract_movie_details(movie_block, movie)

            return movie

        except Exception as e:
            print(f"❌ 解析第{rank}部电影时出错: {e}")
            return None

    def _extract_movie_details(self, movie_block, movie: Movie):
        """提取电影详细信息（导演、主演、类型等）"""
        abstract_element = movie_block.find('div', class_='abstract')

        if abstract_element:
            abstract_text = abstract_element.get_text().strip()
            lines = [line.strip() for line in abstract_text.split('\n') if line.strip()]

            for line in lines:
                if '导演:' in line:
                    movie.director = line.replace('导演:', '').strip()
                elif '主演:' in line:
                    movie.actors = line.replace('主演:', '').strip()
                elif '类型:' in line:
                    movie.genre = line.replace('类型:', '').strip()
                elif '制片国家/地区:' in line:
                    movie.country = line.replace('制片国家/地区:', '').strip()
                elif '年份:' in line:
                    movie.year = line.replace('年份:', '').strip()

    def download_images(self, movies: List[Movie] = None) -> None:
        """下载所有电影的图片"""
        if movies is None:
            movies = self.movies

        print("\n🖼️  开始下载电影图片...")

        # 创建图片文件夹
        if not os.path.exists(self.img_folder):
            os.makedirs(self.img_folder)
            print(f"📁 创建图片文件夹: {self.img_folder}")

        for movie in movies:
            if movie.img_url:
                success = self._download_single_image(movie)
                movie.download_success = success
                print(f"{'✅' if success else '❌'} {movie.title} - 图片下载{'成功' if success else '失败'}")
            else:
                print(f"⚠️ {movie.title} - 无图片URL")

    def _download_single_image(self, movie: Movie) -> bool:
        """下载单个电影的图片"""
        try:
            # 清理文件名
            clean_title = self._clean_filename(movie.title)
            img_filename = f"{movie.rank:02d}_{clean_title}.jpg"
            img_path = os.path.join(self.img_folder, img_filename)

            # 下载图片
            response = requests.get(movie.img_url, headers=self.headers, timeout=10)
            if response.status_code == 200:
                with open(img_path, 'wb') as f:
                    f.write(response.content)
                movie.img_local_path = img_path
                return True
            else:
                print(f"❌ 图片下载失败，状态码: {response.status_code}")
                return False

        except Exception as e:
            print(f"❌ 图片下载出错: {e}")
            return False

    def _clean_filename(self, filename: str) -> str:
        """清理文件名，移除Windows不允许的字符"""
        invalid_chars = r'[<>:"/\\|?*]'
        cleaned = re.sub(invalid_chars, '', filename)

        # 限制文件名长度
        if len(cleaned) > 50:
            cleaned = cleaned[:50]

        return cleaned

    def export_to_excel(self, movies: List[Movie] = None, filename: str = None) -> str:
        """将电影信息导出为Excel文件（使用xlwt）"""
        if movies is None:
            movies = self.movies

        if not movies:
            print("❌ 没有电影数据可导出")
            return ""

        if filename is None:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"豆瓣电影Top250_{timestamp}.xls"

        try:
            # 创建工作簿
            workbook = xlwt.Workbook(encoding='utf-8')
            worksheet = workbook.add_sheet('豆瓣电影Top250')

            # 设置表头
            headers = ['排名', '电影标题', '电影链接', '图片URL', '评分', '评价人数',
                      '导演', '主演', '类型', '制片国家/地区', '年份', '本地图片路径', '图片下载状态']

            # 写入表头
            for col, header in enumerate(headers):
                worksheet.write(0, col, header)

            # 写入数据
            for row, movie in enumerate(movies, 1):
                movie_data = movie.to_dict()
                for col, header in enumerate(headers):
                    worksheet.write(row, col, str(movie_data[header]))

            # 保存文件到程序运行目录
            excel_path = os.path.join(os.getcwd(), filename)
            workbook.save(excel_path)
            print(f"\n📄 数据已导出到: {excel_path}")
            return excel_path

        except Exception as e:
            print(f"❌ Excel导出失败: {e}")
            return ""

    def print_movies_info(self, movies: List[Movie] = None) -> None:
        """打印电影信息"""
        if movies is None:
            movies = self.movies

        if not movies:
            print("❌ 没有电影数据可显示")
            return

        print("\n" + "=" * 80)
        print("📊 豆瓣电影Top250信息汇总")
        print("=" * 80)

        for movie in movies:
            print(movie)
            print("-" * 80)


def main():
    """主函数：调用各个功能模块"""
    print("🎬 豆瓣电影Top250爬虫程序 (requests+BeautifulSoup版本)")
    print("=" * 50)

    # 设置爬取参数
    try:
        max_pages = int(input("请输入要爬取的页数 (默认为3): ").strip() or "3")
    except:
        max_pages = 3

    print(f"🔧 爬取设置: {max_pages}页")

    # 1. 创建爬虫实例
    spider = DoubanSpider()

    # 2. 收集电影数据
    movies = spider.scrape_movies(max_pages=max_pages)

    if not movies:
        print("❌ 未获取到电影数据，程序结束")
        return

    # 3. 下载图片
    spider.download_images()

    # 4. 打印电影信息
    spider.print_movies_info()

    # 5. 导出到Excel
    excel_file = spider.export_to_excel()

    # 6. 统计信息
    success_count = sum(1 for movie in movies if movie.download_success)
    print(f"\n📊 爬取统计:")
    print(f"📽️  总电影数: {len(movies)}")
    print(f"🖼️  图片下载成功: {success_count}/{len(movies)}")
    print(f"📄 Excel文件: {excel_file}")

    print("\n🎉 程序执行完成！")


if __name__ == "__main__":
    main()