# -*- coding: utf-8 -*-

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import ChromeOptions

import os
import time
import json
import base64
import requests
import pandas as pd
from tqdm import tqdm
from loguru import logger


def scrapy_one_img(img_name):
    ## 配置selenium的设置，绕过浏览器检测以及初始化Chrome
    url_format = "https://www.google.com/search?q={}&sca_esv=1e612c396070fea5&rlz=1C1FHFK_zh-CNHK1087HK1087&udm=2&biw=1536&bih=730&sxsrf=ACQVn08YrrEpQqPjytESUA0ddHnRXf4UTQ%3A1710426920882&ei=KAvzZYe0Nb2k2roP1NOrsAQ&ved=0ahUKEwiH6KHo_POEAxU9klYBHdTpCkYQ4dUDCBA&uact=5&oq=And+God+Created+Woman+%28Et+Dieu%E2%80%A6Cr%C3%A9a+la+Femme%29+%281956%29&gs_lp=Egxnd3Mtd2l6LXNlcnAiN0FuZCBHb2QgQ3JlYXRlZCBXb21hbiAoRXQgRGlldeKApkNyw6lhIGxhIEZlbW1lKSAoMTk1NilI6wdQAFgAcAF4AJABAJgBAKABAKoBALgBA8gBAJgCAKACAJgDAIgGAZIHAKAHAA&sclient=gws-wiz-serp"
    options = ChromeOptions()
    options.add_experimental_option('excludeSwitches', ['enable-automation'])
    options.add_experimental_option('useAutomationExtension', False)
    browser = webdriver.Chrome(options=options)
    browser.execute_cdp_cmd('Page.addScriptToEvaluateOnNewDocument', {
        'source': 'Object.defineProperty(navigator, "webdriver", {get: () => undefined})'
    })

    ## 完善图片url
    url = url_format.format(img_name)
    ## 浏览器获取内容
    browser.get(url)
    wait = WebDriverWait(browser, 10)
    ## 等待网页加载
    wait.until(EC.presence_of_element_located((By.ID, 'search')))
    logger.info(browser.current_url)

    time.sleep(1)
    # #rso > div > div > div.wH6SXe.u32vCb
    # #rso > div > div > div.wH6SXe.u32vCb > div > div > div:nth-child(1)
    movie_div = browser.find_elements(By.CSS_SELECTOR, "#rso .wH6SXe div div")
    print(len(movie_div))

    if len(movie_div) == 0:
        logger.warning("No movies found")
        browser.close()
        return {"status": 0, "img": None}
    time.sleep(0.5)
    movie_detail = movie_div[0]
    img = movie_detail.find_element(By.CSS_SELECTOR, ".czzyk img")
    image_base64 = img.get_attribute('src')
    # print(image_base64)
    image_head, image_context = image_base64.split(",")  # 将base64_str以“,”分割为两部分
    # image_type = image_head.split(";")[0].split("/")[-1]
    # print(image_type)
    image_data = base64.b64decode(image_context)
    logger.info(f"{img_name}已爬取完成")
    return {"status": 1, "img": image_data}


if __name__ == "__main__":
    df = pd.read_csv("../Data/ml-1m/movies.dat", sep='::', header=None, encoding="ISO-8859-1")
    df.columns = ['mid', 'movie_name', 'classes']
    sid2movie_map = {}
    for index, row in df.iterrows():
        mid = row[0]
        movie_name = row[1]
        if mid not in sid2movie_map:
            sid2movie_map[mid] = movie_name
        else:
            continue
    # print(sid2movie_map)

    ## 待输入电影id
    movie_id = 3942
    logger.info(f"正在抓取\t{sid2movie_map[movie_id]}")
    if movie_id not in sid2movie_map:
        raise Exception("输入id错误！")

    res = scrapy_one_img(sid2movie_map[movie_id])
    if res["status"] == 1:
        img = res["img"]
        with open(f"{movie_id}.jpg", 'wb') as f:
            f.write(img)
    else:
        logger.warning("爬取失败")

