# -*- coding:utf-8 -*-
# @Time:2024/4/1921:01
# @Author:miuzg
# @FileName:new test2.py
# @Software:PyCharm

import requests
import random
from bs4 import BeautifulSoup
import time  # 导入 time 模块用于延时
from openpyxl import Workbook

# 预定义的 User-Agent 列表
user_agents = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/108.0',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:108.0) Gecko/20100101 Firefox/108.0',
    'Mozilla/5.0 (iPhone; CPU iPhone OS 16_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.0 Mobile/15E148 Safari/604.1',
    'Mozilla/5.0 (Linux; Android 10; SM-G975F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Mobile Safari/537.36',
    # 你可以根据需要添加更多 User-Agent 字符串，越多越好，越新越好
]

# 你从浏览器中复制的 Cookie 字符串
# 将你获取到的 Cookie 字符串赋给这个变量
cookie_string_raw = 'bid=b5NJhx1z7Vs; ap_v=0,6.0; __utmc=30149280; dbcl2="286549753:I0ktw/ZsCWk"; ck=p-Dt; push_noty_num=0; push_doumail_num=0; __utma=30149280.1191208119.1745491741.1745491741.1745496910.2; __utmb=30149280.0.10.1745496910; __utmz=30149280.1745496910.2.2.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/'


# 将 Cookie 字符串转换为字典的函数
def parse_cookie_string(cookie_string):
    """将 Cookie 字符串转换为字典"""
    cookie_dict = {}
    try:
        cookies = cookie_string.strip().split('; ')
        for cookie in cookies:
            if '=' in cookie:
                name, value = cookie.split('=', 1)
                cookie_dict[name] = value
            else:
                cookie_dict[cookie] = ''
    except Exception as e:
        print(f"解析 Cookie 字符串时出错: {e}")
        return {}
    return cookie_dict


# 调用函数将 Cookie 字符串转换为字典
cookies_dict = parse_cookie_string(cookie_string_raw)  # 使用 parse_cookie_string 将原始字符串转换为字典

base_url = 'https://movie.douban.com/top250?start='

# 创建 excel 表
head_insert = ['标题', '图片链接', '导演', '主演','引言','其他信息']
wb = Workbook()
ws = wb.active
ws.title = '豆瓣top250'

ws.append(head_insert)


# 爬取循环
for i in range(0, 275, 25):
    url = base_url + str(i)

    current_user_agent = random.choice(user_agents)

    headers = {
        'User-Agent': current_user_agent,
        'Referer': 'https://movie.douban.com/top250?start=0&filter=',
        'Connection': 'keep-alive',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3'
    }

    print(f"正在请求 URL: {url}")

    try:
        # 将转换后的字典 cookies_dict 传递给 cookies 参数
        response = requests.get(url=url, headers=headers, cookies=cookies_dict, timeout=15)
        response.raise_for_status()

        print(f"请求成功: {response.status_code}")

        soup = BeautifulSoup(response.text, 'lxml')
        infos = soup.find('ol', class_='grid_view').find_all('li')
        for info in infos:
            pic_src = info.find('div', class_='pic').find('a').find('img').get('src')
            print('图片为:', pic_src)
            title = info.find('div', class_='info').find('div', class_='hd').find('a').find('span', class_='title').text
            print('标题为:', title)
            p_tag = info.find('div', class_='info').find('div', class_='bd').find('p')

            # 获取p标签完整文本并删除首尾的空白符
            p_text = p_tag.text.strip()

            # 将文本分割
            parts = p_text.split('主演:')

            director = '无'
            actor = '无'
            year_country_genre = '无'

            # 检查分割后的列表长度
            if len(parts) > 1:

                director_part = parts[0].strip()

                actor_and_rest = parts[1].strip()
                # 提取导演前缀
                if '导演:' in director_part:
                    director = director_part.replace('导演:', '', 1).strip()  # 找到第一个匹配项进行修改
                actor = actor_and_rest.splitlines()[0].strip()
                year_country_genre = actor_and_rest.splitlines()[1].strip()  # 分割演员和年份等

                print('导演为:', director)
                print('主演为:', actor)
                print('其他信息为:', year_country_genre)

            elif len(parts) == 1:
                print('不存在主演')
                director_part = parts[0].splitlines()[0].strip()
                year_country_genre = parts[0].splitlines()[1].strip()

                if '导演:' in director_part:
                    director = director_part.replace('导演:', '', 1).strip()  # 找到第一个匹配项进行修改

                    print('导演为:', director)
                    print('不存在主演')
                    print('其他信息为:', year_country_genre)
            try:
                quote = info.find('div', class_='bd').find('p', class_='quote').find('span').text.strip()
                print('引言为:', quote)
            except:
                print('不存在引言')

            try:
                rating = info.find('div', class_='bd').find('div').find('span', class_='rating_num').text.strip()
                print('评分为:', rating)

            except:
                print('不存在评分')

            print("爬取完成。")

            # 写入 excel
            movie_info = [title, pic_src, director, actor, quote, year_country_genre]

            ws.append(movie_info)
            print('写入成功')
            print('*' * 60)




    except requests.exceptions.RequestException as e:
        print(f"请求失败 for {url}: {e}")
        if isinstance(e, requests.exceptions.HTTPError) and e.response.status_code == 403:
            print("收到 403 Forbidden 错误，可能被反爬机制阻止。")
            time.sleep(random.uniform(10, 20))
            continue
        else:
            time.sleep(random.uniform(5, 10))
            continue

    sleep_time = random.uniform(3, 7)
    print(f"等待 {sleep_time:.2f} 秒...")
    time.sleep(sleep_time)


# 保存工作簿
print('全部爬虫流程结束，正在写入工作簿...')

wb.save('豆瓣top250.xlsx')
print('\n工作簿已保存为 豆瓣top250.xlsx')