import crawlertool as tool
from bs4 import BeautifulSoup
import time
import json
import random
# pip install fake_useragent
from fake_useragent import UserAgent

'''
练习：豆瓣影评数据爬虫
'''
class SpiderDouBanFilmReview():
    def __init__(self):
        self.douban_film_review_url = 'https://movie.douban.com/review/best/'
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Host': 'movie.douban.com',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': UserAgent().chrome
        }
        # 推荐等级：键表示推荐等级中文描述，值表示推荐等级数值，用于表示推荐星数
        self.recommend_dict = {'力荐': 5, '推荐': 4, '还行': 3, '较差': 2}
    def main(self, page_num):
        print('正在爬取豆瓣影评，请稍后。。。')
        film_review_list = []
        for page_size in range(page_num):
            url = self.douban_film_review_url + '?start={0}'.format(page_size*20)
            response = tool.do_request(url, headers=self.headers)
            bs = BeautifulSoup(response.content.decode(errors="ignore"), 'lxml')
            # 定位所有影评标签
            film_review_labels = bs.select('#content > div > div > div > div')
            # 这里减一是因为最后一条不是影评，解析会出错，所以排除
            size = len(film_review_labels) - 1
            # 遍历
            for i in range(size):
                film_review_label = film_review_labels[i]
                # 电影地址
                film_url = film_review_label.select_one('div > a')['href']
                # 电影名
                film_title = film_review_label.select_one('div > a > img')['title']
                # 电影封面链接
                film_cover = film_review_label.select_one('div > a > img')['src']
                # 影评人头像链接
                reviewer_avator = film_review_label.select_one('div > header > .avator > img')['src']
                # 影评人昵称
                reviewer_name = film_review_label.select_one('div > header > .name').text
                # 推荐等级
                recommend_grade_obj = film_review_label.select_one('div > header > .main-title-rating')
                recommend_grade = '暂未评分'
                if recommend_grade_obj is not None:
                    recommend_grade = film_review_label.select_one('div > header > .main-title-rating')['title']
                # 评价时间
                review_date = film_review_label.select_one('div > header > .main-meta').text
                # 影评内容标题
                review_content_title = film_review_label.select_one('div > div > h2 > a').text
                # 影评内容(短)
                review_content_short = film_review_label.select_one('div > div > .review-short > .short-content').text
                film_review_list.append({
                    'film_title': film_title,
                    'film_url': film_url,
                    'film_cover': film_cover,
                    'reviewer_name': reviewer_name,
                    'reviewer_avator': reviewer_avator,
                    'recommend_grade': recommend_grade,
                    'review_date': review_date,
                    'review_content_title': review_content_title,
                    'review_content_short': review_content_short,
                })
            time.sleep(random.randint(3, 5))
        # ensure_ascii=False 解决中文自动转码问题
        result_json = json.dumps(film_review_list, ensure_ascii=False)
        print(result_json)

if __name__ == '__main__':
    spider = SpiderDouBanFilmReview()
    spider.main(5)
