from gevent import monkey

monkey.patch_all()
import gevent, requests, bs4, time, csv
from gevent.queue import Queue

pre_url = "https://movie.douban.com/"
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:94.0) Gecko/20100101 Firefox/94.0'}
url_list = Queue()
comment_list = Queue()
res = requests.get("https://movie.douban.com/", headers=headers)
bs = bs4.BeautifulSoup(res.text, 'html.parser')
movie_list = bs.find_all(class_='ui-slide-item')
for movie in movie_list:
    movie_link = movie.find('li', class_='poster').find('a')['href']
    url_list.put_nowait(movie_link)
    if len(url_list) == 30:
        break
csv_file_movie = open('movie.csv', 'w', newline='', encoding='utf-8')
csv_file_comment = open('comment.csv', 'w', newline='', encoding='utf-8')
writer_movie = csv.writer(csv_file_movie)
writer_movie.writerow(['电影名', '导演', '编剧', '主演', '评论链接'])
writer_comment = csv.writer(csv_file_comment)
writer_comment.writerow(['电影名', '评论'])


def crawler_info():
    while not url_list.empty():
        url = url_list.get_nowait()
        print(url)
        res = requests.get(url, headers=headers)
        bs = bs4.BeautifulSoup(res.text, 'html.parser')
        movie_name = bs.find(id="content").find('h1').text.strip()
        comment_link = bs.find(id='comments-section').find(class_='pl').find('a')['href']
        comment_list.put_nowait(comment_link)
        for i in range(0, 5):
            comment_list.put_nowait(comment_link + "&start=" + str(i * 20) + "&limit=20")
        movie_info_list = bs.find(id='info').find_all(class_='attrs')
        if len(movie_info_list) == 3:
            movie_dire = movie_info_list[0].text.strip()
            movie_bian = movie_info_list[1].text.strip()
            movie_actor = movie_info_list[2].text.strip()
        elif len(movie_info_list) == 2:
            movie_dire = movie_info_list[0].text.strip()
            movie_bian = '暂无'
            movie_actor = movie_info_list[1].text.strip()
        else:
            movie_dire = '暂无'
            movie_bian = '暂无'
            movie_actor = movie_info_list[0].text.strip()
        writer_movie.writerow([movie_name, movie_dire, movie_bian, movie_actor, comment_link])
        time.sleep(1)


def crawler_comment_url():
    while not comment_list.empty():
        url = comment_list.get_nowait()
        print(url)
        res = requests.get(url, headers=headers)
        bs = bs4.BeautifulSoup(res.text, 'html.parser')
        name = bs.find(id='content').find('h1').text.strip()
        comments = bs.find_all(class_='comment-item')
        for comment in comments:
            content = comment.find(class_='comment').find(class_='comment-content').find(class_='short').text.strip()
            writer_comment.writerow([name, content])
        time.sleep(0.1)


task_list = []
for i in range(2):
    task = gevent.spawn(crawler_info())
    task_list.append(task)
for i in range(5):
    task = gevent.spawn(crawler_comment_url())
    task_list.append(task)
gevent.joinall(task_list)
