# 导入所需的库
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.edge.options import Options as EdgeOptions
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from lxml import etree
import openpyxl
import pandas as pd


headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0',
            'Cookie': 'll="118160"; bid=n7U3zmjfTuc; _pk_id.100001.4cf6=9b9860ce562eaa21.1684325777.; __yadk_uid=mDZKEvyucaB3aIAXY3UYJ20bY2u4VmmT; __gads=ID=0360a756bb0824f1-22ff9cead3e0003c:T=1684325778:RT=1684325778:S=ALNI_MYt2YKDj9L2UsC77oMdsi0_92YoaQ; __gpi=UID=00000c0759bfdce3:T=1684325778:RT=1684325778:S=ALNI_MaRLmviQevXBNc1_NcF7mMgqFNPmQ; _vwo_uuid_v2=D82E2781BA7514D9BEFB0A88699D5812E|cabb83a8f1479a783ff34d84235bf4eb; ct=y; ap_v=0,6.0; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1704694064%2C%22https%3A%2F%2Fcn.bing.com%2F%22%5D; _pk_ses.100001.4cf6=1; __utma=30149280.940552568.1684325777.1704649762.1704694064.13; __utmb=30149280.0.10.1704694064; __utmc=30149280; __utmz=30149280.1704694064.13.7.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utma=223695111.79828918.1684325777.1704649762.1704694064.13; __utmb=223695111.0.10.1704694064; __utmc=223695111; __utmz=223695111.1704694064.13.7.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/'
}
def login(chrome_option):
    #模拟登录
    username = 13416261936
    password = 1595275518
    driver = webdriver.Chrome(options=chrome_option)
    driver.get('https://www.douban.com/')
    iframe = driver.find_element_by_tag_name("iframe")
    driver.switch_to.frame(iframe)
    driver.find_element_by_class_name('account-tab-account').click()
    driver.find_element_by_id('username').send_keys(username)
    driver.find_element_by_id('password').send_keys(password)
    driver.find_element_by_class_name('btn-account').click()
    time.sleep(30)
    return driver

def click_1(i):
    menu_jc = None
    menu_jc = driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[1]/div[1]')
    menu_jc.click()
    time.sleep(1)
    try:
        menu_jc = driver.find_element_by_xpath(f'//*[@id="app"]/div/div[1]/div/div[1]/div[1]/div/div[2]/div/div/ul/li[{i}]/span')
        menu_jc.click()
    except NoSuchElementException:
        print("找完了所有类型的影片")

def click_more_sample():
    menu_jc = driver.find_element_by_xpath('//*[@id="app"]/div/div[2]/div/button')
    menu_jc.click()
    time.sleep(2)

def click_more():
    for i in range(30):
        try:
            # 尝试找到按钮并点击它
            menu_jc = WebDriverWait(driver, 2).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="app"]/div/div[2]/div/button')))
            menu_jc.click()
            # 可能需要等待一些操作完成或页面更新
            time.sleep(1)
        except NoSuchElementException:
            # 如果找不到按钮，则退出循环
            break
        except Exception as e:
            # 处理其他可能的异常
            print(f"An error occurred: {e}")
            break  # 或者你可以选择不退出循环，而是继续下一次迭代
    print("加载完成")

def spider_labels(data_label):#爬取年份、国家和标签
    # 初始化
    item = {}
    item["year"] = []
    item["country"] = []
    item["labels"] = []
    # 需要将label切分 2023 / 法国 / 剧情 喜剧 / 昆汀·杜皮约 / 拉斐尔·奎纳德 皮奥·马麦
    for label in data_label:
        # print(label)
        final_list = []
        parts_by_slash = label.split('/')
        # 遍历按斜杠切分后的部分
        for part in parts_by_slash:
            # 如果部分包含空格，则按空格进一步切分
            if ' ' in part:
                sub_parts = part.split(' ')
                final_list.extend(sub_parts)  # 将子部分添加到最终列表中
            else:
                final_list.append(part)  # 否则，直接将整个部分添加到最终列表中
        # ['2023', '', '', '法国', '', '', '剧情', '喜剧', '', '', '昆汀·杜皮约', '', '', '拉斐尔·奎纳德', '皮奥·马麦']
        item["year"].append(int(final_list[0]))
        space_count = 0  # 记录空符号的个数，每两个就是不同的标签
        country_temp = 0  # 记录电影国家类型有没有被提取，有为1，没有为0
        label_temp = 0  # 记录电影标签类型
        # 遍历空格切分后的部分
        labels = []  # 记录标签
        for temp in final_list:
            if temp == '':
                space_count += 1
            elif ((temp != '') and (space_count == 2) and (country_temp == 0)):
                country_temp = 1
                item["country"].append(temp)
            elif ((temp != '') and (space_count != 2) and (country_temp != 0)):
                labels.append(temp)
            if (space_count >= 5):  # 若大于4了就不再提取了
                # print("不取了")
                break
        # print(space_count)
        item["labels"].append(labels)
    return item

def spider_comments(driver, website): #爬取这个电影的第一个评论
    item = []
    for web in website:
        driver.get(web)
        html = driver.page_source
        # 使用lxml的html模块将HTML内容解析为etree对象
        tree = etree.HTML(html)
        # 使用XPath表达式提取评论
        name = tree.xpath('//*[@id="content"]/h1/span[1]/text()')
        comments = tree.xpath('//*[@id="hot-comments"]/div[1]/div/p/span/text()')
        # 将提取的评论添加到item中
        print(f"正在爬取{name},网址为{web}")
        item.append(comments)
        # print(item)
    return item

def spider_main(driver, driver_comment): #主要的爬虫代码
    # 获取网页内容Elements
    html = driver.page_source
    data_list = etree.HTML(html).xpath('//*[@id="app"]/div/div[2]/ul')
    items = {}
    for data in data_list:
        item = {}
        item["name"] = data.xpath("./li/a/div/div[2]/div/div[1]/span/text()")
        data_label = data.xpath("./li/a/div/div[2]/div/div[1]/div/text()")
        item_part = spider_labels(data_label)
        item["year"] = item_part["year"]
        item["country"] = item_part["country"]
        item["labels"] = item_part["labels"]
        website = item["website"] = data.xpath("./li/a/@href")  # 得出来的是一个list
        item["comment"] = spider_comments(driver_comment, website)
        item["img"] = data.xpath('./li/a/div/div[1]/div/img[@class="drc-cover-pic"]/@src')
        item["rate"] = data.xpath('./li/a/div/div[2]/div/div[2]/span[2]/text()')
    return item




if __name__ == '__main__':
    option = webdriver.ChromeOptions()
    option.add_argument('--headless')
    option.add_argument('blink-settings=imagesEnabled=false')
    driver = webdriver.Chrome(options=option)
    driver_comment = webdriver.Chrome(options=option)
    url = 'https://movie.douban.com/explore'
    #打开URL
    driver.get(url)
    #停一下，等待数据加载完毕
    time.sleep(1)
    #模拟点击(点击到类型) P.S 要实现循环点击
    # click_1(i)
    item = {}
    for i in range(22, 24): #从地区的华语点到日本等等
        menu_jc = None
        # //*[@id="app"]/div/div[1]/div/div[1]/div[1]
        # //*[@id="app"]/div/div[1]/div/div[1]/div[2]/div
        menu_jc = driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[1]/div[2]/div')
        menu_jc.click()
        time.sleep(1)
        try:
            menu_jc_another = driver.find_element_by_xpath(f'//*[@id="app"]/div/div[1]/div/div[1]/div[2]/div/div[2]/div/div/ul/li[{i}]/span')
            menu_jc_another.click()
            print(f"Clicked on menu item {i}")
            # 模拟点击(点击加载更多)
            click_more()
            # for j in range(1, 2):
            #     click_more_sample()
            # print("点击了", j, "次")
            # item.append(spider_main(driver))
            item.update(spider_main(driver, driver_comment))
            time.sleep(2)
            # 将items列表转换为DataFrame
            df = pd.DataFrame(item)
            print(df)
            # 写入Excel文件
            df.to_excel(f'movie_spider_result{i}.xlsx', index=False)
        except NoSuchElementException:
            print("找完了所有类型的影片")
    driver_comment.quit()
    driver.quit()






