# 抓取猫眼电影 https://www.maoyan.com/films?showType=3
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import random
from lxml import etree
import time
import csv


def options():
    options = Options()
    options.add_argument('--headless')
    options.add_argument('ignore-certificate-errors')
    options.add_argument('--disable-blink-features=AutomationControlled')
    options.add_experimental_option('excludeSwitches', ['enable-automation'])
    return options


def get_data(a):
    html = driver.page_source
    etree_html = etree.HTML(html)
    datas_info = etree_html.xpath('//dl/dd')
    for data_info in datas_info:
        data_name_str = data_info.xpath('./div[2]/@title')[0]
        data_score = data_info.xpath('./div[3]/i/text()')
        data_score_str = ''.join(data_score)
        if len(data_score) == 0:
            data_score_str = data_info.xpath('./div[3]/text()')[0]
        data_type = data_info.xpath('./div[1]//div[@class="movie-hover-title"][2]/text()')
        data_type_str = "".join(data_type).replace("\n", '').replace(' ', '')
        data_actor = data_info.xpath('./div[1]//div[@class="movie-hover-title"][3]/text()')
        data_actor_str = "".join(data_actor).replace("\n", '').replace(' ', '')
        data_time = data_info.xpath('./div[1]//div[@class="movie-hover-info"]/div[4]/text()')
        data_time_str = "".join(data_time).replace("\n", '').strip()
        b_list = [data_name_str, data_score_str, data_type_str, data_actor_str, data_time_str]
        a.append(b_list)


def save_csv(a_list):
    with open('电影.csv', 'w', encoding='utf-8', newline="") as f:
        writer = csv.writer(f)
        writer.writerow(["电影名", "评分", "类型", "演员", "上映时间"])
        writer.writerows(a_list)


if __name__ == "__main__":
    pages = int(input("请问获取几页数据:"))
    options = options()
    driver = webdriver.Chrome(options=options)
    driver.get('https://www.maoyan.com/films?showType=3')
    driver.maximize_window()
    time.sleep(3)
    a_list = []
    for i in range(0, pages):
        get_data(a_list)
        rand = random.randint(700, 800)
        driver.execute_script(f'window.scrollTo(0,{rand * 2})')  # 700 800
        if i != pages - 1:
            time.sleep(1)
            driver.find_element(By.XPATH, '//*[@id="app"]/div/div[2]/div[3]/ul/li[8]/a').click()
        time.sleep(2)
    save_csv(a_list)
