import time

import os
import requests
import pandas as pd
from bs4 import BeautifulSoup
import random

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

headers = {'user-agent': 'my-app/0.0.1'}
movie_list = []


# ... (其他代码保持不变)

def login_douban(username, password):
    """使用 Selenium 模拟登录豆瓣，并返回 cookies"""
    options = webdriver.ChromeOptions()  # 或 FirefoxOptions()，根据你的浏览器
    # options.add_argument("--headless")  # 如果需要无头模式，取消注释
    driver = webdriver.Chrome(options=options)  # 或 webdriver.Firefox(options=options)

    driver.get("https://accounts.douban.com/passport/login")

    # 等待密码登录标签出现
    WebDriverWait(driver, 10).until(
        EC.presence_of_element_located((By.XPATH, '//li[@class="account-tab-account"]'))
    ).click()  # 点击切换到密码登录

    # 找到用户名和密码输入框并输入
    username_input = WebDriverWait(driver, 10).until(
        EC.presence_of_element_located((By.ID, "username"))
    )
    password_input = driver.find_element(By.ID, "password")  # 密码框
    username_input.send_keys(username)
    password_input.send_keys(password)

    # 提交登录表单  --  找到登录按钮并点击
    login_button = driver.find_element(By.CSS_SELECTOR, 'div.account-form-field-submit a')
    login_button.click()

    # 等待页面跳转到首页，确保登录成功 (可以使用其他判断条件)
    WebDriverWait(driver, 10).until(EC.url_contains("www.douban.com/"))

    # 获取 cookies
    cookies = driver.get_cookies()
    print(cookies)
    driver.quit()  # 关闭浏览器

    return cookies


def get_list(soup_list):
    '''
    清洗解析后的网页信息，并以列表的形式返回
    :param soup_list:
    :return: list
    '''
    list = []
    for ele in soup_list:
        list.append(ele.string)
    return list


def save_csv(movies):
    print("movies:\n", movies)
    file_path = "../doc/datasource/C8-8.3-数据采集.xlsx"

    sheet_name = "Sheet1"  # 你想写入的 sheet 名称

    # 检查文件是否存在
    if os.path.exists(file_path):
        # 文件存在，读取现有数据
        try:
            existing_data = pd.read_excel(file_path, sheet_name=sheet_name, engine='openpyxl')
        except ValueError:  # 如果Sheet不存在则创建
            existing_data = pd.DataFrame()
        # 将新数据追加到现有数据
        new_data = pd.DataFrame(movies)
        combined_data = pd.concat([existing_data, new_data], ignore_index=True)
        # 将合并后的数据写入 Excel
        with pd.ExcelWriter(file_path, engine='openpyxl', mode='a', if_sheet_exists='overlay') as writer:
            combined_data.to_excel(writer, sheet_name=sheet_name, index=False)
    else:
        # 文件不存在，直接创建新文件并写入数据
        data = pd.DataFrame(movies)
        data.to_excel(file_path, sheet_name=sheet_name, index=False)
    print(f"数据已成功追加到 {file_path} 文件的 {sheet_name} sheet 中。")


def get_info(url):
    response = requests.get(url=url, headers=headers)
    soup = BeautifulSoup(response.text, 'html.parser')
    # 存储容器，电影信息一览
    movie_info = {}
    # 容错处理
    try:
        movie_info['movie_name'] = soup.find(property="v:itemreviewed").string
        movie_info['director'] = soup.find(rel="v:directedBy").string
        movie_info['writer'] = get_list(soup.find_all(class_="attrs"))
        movie_info['actor_list'] = get_list(soup.find_all(rel="v:starring"))
        movie_info['type_list'] = get_list(soup.find_all(property="v:genre"))
        movie_info['country'] = soup.find(text="制片国家/地区:").next_element  # 国家
        movie_info['language'] = soup.find(string='语言:').next_element  # 获取当前元素的下一个元素
        movie_info['release_date'] = soup.find(property="v:initialReleaseDate").string
        movie_info['run_time'] = soup.find(property="v:runtime").string
        movie_info['average_score'] = soup.find(property="v:average").string
        movie_info['number_of_people'] = soup.find(property="v:votes").string
        print("=========已获取的电影：", movie_info['movie_name'])
    except AttributeError:
        print("电影不存在")
    return movie_info


def get_page(page_link, tag):
    # # 登录豆瓣获取 cookies
    # cookies = login_douban("18860660885", "Zilv.999+")
    # # 将 cookies 转换为 requests 可用的格式
    # cookies_dict = {cookie['name']: cookie['value'] for cookie in cookies}

    page = 80
    max_page = 2780
    while page <= max_page:
        # 访问页面
        url = page_link + "?tags=" + tag + "&start=" + page.__str__()
        print("当前的url:\n", url)
        response = requests.get(url=url, headers=headers)
        movie_info = response.text.replace("\\/", '/')
        print("movie_info", movie_info)
        # 将获取到的string转换为字典
        movie_info = eval(movie_info)  # eval() = dict()

        # 转储到列表中
        movie_links = []
        for m in movie_info['data']:
            # 电影的详情数据
            movie_list.append(m)  # m是字典类型的数据
            url_str = m.get('url')
            # 电影链接
            movie_links.append(url_str)
        print("所有的电影链接数据:\n", movie_links)

        # 调用获取详细信息的方法
        movie20_list = []
        for l in movie_links:
            time.sleep(random.uniform(1, 2))
            movie = get_info(url=l)

            movie20_list.append(movie)
        # 寸 csv
        save_csv(movie20_list)
        # 修改start参数
        page += 20


if __name__ == '__main__':
    get_page("https://movie.douban.com/j/new_search_subjects", tag="电影")
