import requests
from lxml import etree
import pandas as pd
import time


# 抓取评论
def get_info(url):
    while 1:
        res = requests.get(url, headers=headers)
        xml = etree.HTML(res.text)
        comments = xml.xpath('//span[@class="short"]/text()')   # 评论
        votes = xml.xpath('//span[@class="votes"]/text()')      # 有用数
        star = xml.xpath('//span[@class="comment-info"]/span[2]/@title')    # 评级
        for i, j, k in zip(comments, votes, star):
            yield {'评论': i, '有用数': j, '评级': k}
        if len(comments) > 0:
            break
        else:
            print('页数超限了，需要更多需登录获取再加上cookies')
            return ''

# 根据翻页规律构造url，省得解析翻页
def create_url(url0):
    url_ls = [url0]
    page = 10  # 一页20个短评，未登录时最多爬10页
    url_ls += [f'https://movie.douban.com/subject/25853071/comments?start={i*20}&limit=20&sort=new_score&status=P' for i in range(1, page)]
    return url_ls


if __name__ == '__main__':
    url = 'https://movie.douban.com/subject/25853071/comments?status=P'
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'}
    url_ls = create_url(url)
    count = 0
    data = []
    print('爬虫开始，总爬取页数=%d' % 10)
    for url in url_ls:
        generator = get_info(url)
        if isinstance(generator, str):
            break
        dic_ls = list(generator)
        data += dic_ls
        count += 1
        print('已爬完第{}页'.format(count))
        time.sleep(2)
    pd.DataFrame(data).to_excel('庆余年评.xlsx', index=False)