import json
import os
import re

import requests
import scrapy
from bs4 import BeautifulSoup
from scrapy import Request
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By

from tutorial.items import MovieItem


# JavaScript动态生成链接的爬取
def get_comment_url():
    url = "https://movie.douban.com/j/review/10601132/full"
    payload = {}
    headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Referer": "https://movie.douban.com/subject/30166972/reviews",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest",
        "sec-ch-ua": "\"Chromium\";v=\"124\", \"Google Chrome\";v=\"124\", \"Not-A.Brand\";v=\"99\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\"",
    }

    chrome_options = Options()
    chrome_options.add_argument("--headless")  # 无界面模式
    # chrome_options.add_argument("--disable-gpu")  # 禁用GPU加速
    # chrome_options.add_argument("--window-size=1920x1080")  # 设置浏览器窗口大小

    # driver = webdriver.Chrome(options=chrome_options)
    # 这里使用环境变量获取电影名称
    Movie_name = os.getenv('MOVIE_NAME')

    # 创建 WebDriver 对象，指明使用chrome浏览器驱动
    wd = webdriver.Chrome(service=Service(r'T:\python\Lib\site-packages\selenium\chromedriver-win64\chromedriver.exe'))

    # 调用WebDriver 对象的get方法 可以让浏览器打开指定网址
    wd.get('https://movie.douban.com/')
    # 以下路径匹配
    wd.find_element(By.XPATH, '//*[@id="inp-query"]').send_keys(Movie_name)
    wd.find_element(By.XPATH, '//*[@id="db-nav-movie"]/div[1]/div/div[2]/form/fieldset/div[2]/input').click()
    # 点击
    # time.sleep(1)
    wd.find_element(By.XPATH, '//*[@id="root"]/div/div[2]/div[1]/div[1]/div[1]/div/div/div[1]/a').click()
    # time.sleep(1)
    wd.find_element(By.XPATH, '//*[@id="reviews-wrapper"]/p/a').click()
    # 获取网页源码
    # 建立列表
    good_list = []
    j = 5  # 设置星际
    list = []
    for i in range(1):  # 爬取指定多少页数
        baseurl = wd.current_url + f"?rating={j}&start=" + str(i * 20)
        print(baseurl + "开始")  # 打印当前所爬取的页数
        response = requests.request("GET", baseurl, headers=headers, data=payload)
        response.encoding = 'utf-8'
        soup = BeautifulSoup(response.text, 'html.parser')  # 给bs处理

        alist = soup.findAll("h2")
        for i in alist:
            list.append(i.a.get("href"))
    wd.quit()
    # 建立列表存影评链接
    list1 = []
    # 遍历列表，获取每个 URL 中的评论：
    for i in list:
        # 去除 URL 中的斜杠和末尾的斜杠
        url_cleaned = i.strip('/')
        # 将 URL 以 '/' 分割成列表
        url_parts = url_cleaned.split('/')
        # 获取列表中倒数第二个元素，并尝试将其转换为整数
        review_id = int(url_parts[-2])
        url = "https://movie.douban.com/j/review/" + str(review_id) + "/full"
        list1.append(url)
    return list1
    print(list1)


# 设置爬虫类
class DoubanSpider(scrapy.Spider):
    name = 'douban'  # 爬虫的识别名称
    allowed_domains = ['movie.douban.com']  # 允许爬取的域名
    start_urls = ['https://movie.douban.com/']  # 爬虫的起始url列表

    def start_requests(self):
        # 获取评论链接
        list1 = get_comment_url()  # JavaScript动态生成链接的爬取
        for url in list1:
            # 将请求用Request对象封装并放到队列中等待处理
            yield Request(url=url)  # ,\meta={'proxy':'http://127.0.0.1:7890'}

    # 对响应进行解析
    def parse(self, response: scrapy.http.Response):
        movie_item = MovieItem()
        rs = json.loads(response.text)  # 将json对象转为python中的字典对象
        contents = rs.get('html')
        # 使用正则表达式对内容进行清洗
        clean_text = re.sub(r'<[^>]*>', '', contents)
        clean_text = re.sub(r'&nbsp;+', ' ', clean_text)
        movie_item['comment'] = clean_text
        yield movie_item
