# -*- coding: utf-8 -*-
import re
import time
import requests
from bs4 import BeautifulSoup
import pandas as pd
import logging
from tqdm import tqdm  # 进度条支持，可选安装
import os
# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)

# 预编译正则表达式
REGEX_PATTERNS = {
    'link': re.compile(r'<a\s+href="(.*?)"'),
    'img_src': re.compile(r'<img.*?src="(.*?)"', re.S),
    'title': re.compile(r'<span class="title">(.*?)</span>'),
    'rating': re.compile(r'<span class="rating_num".*?>(.*?)</span>'),
    'judge': re.compile(r'<span>(\d+)\s*人评价</span>'),
    'inq': re.compile(r'<span class="inq">(.*?)</span>'),
    'bd': re.compile(r'<p class="">(.*?)</p>', re.S)
}

HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
    'Connection': 'keep-alive'
}


def get_movie_data(base_url, max_pages=10, delay=1.5):
    """爬取豆瓣电影数据"""
    all_movies = []

    for page in tqdm(range(max_pages), desc="爬取进度"):
        url = f"{base_url}{page * 25}"
        try:
            html = fetch_url(url)
            if not html:
                continue

            movies = parse_page(html)
            all_movies.extend(movies)

            # 添加延迟避免被封IP
            time.sleep(delay)

        except Exception as e:
            logging.error(f"处理页面 {page + 1} 时出错: {str(e)}")

    return all_movies


def fetch_url(url, retries=3):
    """获取URL内容，支持重试"""
    for attempt in range(retries):
        try:
            response = requests.get(url, headers=HEADERS, timeout=10)
            response.raise_for_status()  # 检查HTTP错误

            # 检查是否被反爬
            if "检测到有异常请求" in response.text:
                logging.warning("触发反爬机制，尝试等待...")
                time.sleep(10)
                continue

            return response.text

        except requests.exceptions.RequestException as e:
            logging.warning(f"请求失败 ({attempt + 1}/{retries}): {str(e)}")
            time.sleep(2)

    logging.error(f"无法获取URL: {url}")
    return None


def parse_page(html):
    """解析HTML页面内容"""
    soup = BeautifulSoup(html, 'html.parser')
    movie_items = soup.find_all('div', class_='item')
    movies = []

    for item in movie_items:
        try:
            movie = parse_movie_item(str(item))
            if movie:
                movies.append(movie)
        except Exception as e:
            logging.error(f"解析电影条目时出错: {str(e)}")

    return movies


def parse_movie_item(item_html):
    """解析单个电影条目"""
    data = {}

    # 电影链接
    if link := safe_regex_search(REGEX_PATTERNS['link'], item_html):
        data['link'] = link[0]

    # 图片链接
    if img_src := safe_regex_search(REGEX_PATTERNS['img_src'], item_html):
        data['img_src'] = img_src[0]

    # 标题处理
    titles = safe_regex_search(REGEX_PATTERNS['title'], item_html, all_matches=True)
    if titles:
        data['chinese_title'] = titles[0]
        data['foreign_title'] = titles[1].replace("/", "").strip() if len(titles) > 1 else "无外文名"
    else:
        data['chinese_title'] = "无标题"
        data['foreign_title'] = "无外文名"

    # 评分
    if rating := safe_regex_search(REGEX_PATTERNS['rating'], item_html):
        data['rating'] = float(rating[0])
    else:
        data['rating'] = 0.0

    # 评价人数
    if judge := safe_regex_search(REGEX_PATTERNS['judge'], item_html):
        data['judge_count'] = int(judge[0])
    else:
        data['judge_count'] = 0

    # 简介
    if inq := safe_regex_search(REGEX_PATTERNS['inq'], item_html):
        data['summary'] = inq[0].replace("。", "").strip()
    else:
        data['summary'] = "无简介"

    # 详细信息
    if bd := safe_regex_search(REGEX_PATTERNS['bd'], item_html):
        # 清理HTML标签和多余空格
        bd_text = re.sub(r'<br\s*/?>', " ", bd[0])
        bd_text = re.sub(r'/\s*', " ", bd_text)
        bd_text = re.sub(r'\s+', " ", bd_text).strip()
        data['details'] = bd_text
    else:
        data['details'] = "无详细信息"

    return data


def safe_regex_search(pattern, text, all_matches=False):
    """安全的正则表达式搜索"""
    try:
        if all_matches:
            return pattern.findall(text)
        else:
            match = pattern.search(text)
            return match.groups() if match else None
    except Exception as e:
        logging.warning(f"正则匹配出错: {str(e)}")
        return None


def save_to_excel(data, filename):
    """保存数据到Excel文件"""
    if not data:
        logging.warning("没有数据可保存")
        return

    try:
        df = pd.DataFrame(data)
        # 重新排列列顺序
        df = df[[
            'chinese_title', 'foreign_title', 'rating', 'judge_count',
            'summary', 'details', 'link', 'img_src'
        ]]

        df.to_excel(filename, index=False, engine='openpyxl')
        logging.info(f"成功保存数据到 {filename}, 共 {len(df)} 条记录")
        return True
    except Exception as e:
        logging.error(f"保存Excel失败: {str(e)}")
        return False


def main():
    base_url = "https://movie.douban.com/top250?start="
    save_path = "TempData/豆瓣电影Top250.xlsx"
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    logging.info("开始爬取豆瓣电影Top250数据...")
    movie_data = get_movie_data(base_url, max_pages=10, delay=1.5)

    if movie_data:
        save_to_excel(movie_data, save_path)
    else:
        logging.error("未能获取任何电影数据")

    logging.info("爬取完成")


if __name__ == "__main__":
    main()