import requests
from bs4 import BeautifulSoup
import logging.handlers
import random
import redis
import time
import json

'''
使用requests爬取豆瓣一出好戏的影评信息
'''
url_prefix = 'https://movie.douban.com/subject/26985127/comments'
init_url = 'https://movie.douban.com/subject/26985127/comments?start=0&limit=20&sort=new_score&status=P'

pool = redis.ConnectionPool(host='127.0.0.1', port=6379)
r = redis.Redis(connection_pool=pool, charset=True, decode_responses=True, encoding='utf-8')
user_agents = [
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
    'Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.9.168 Version/11.50',
    'Mozilla/5.0 (Windows; U; Windows NT 6.1; ) AppleWebKit/534.12 (KHTML, like Gecko) Maxthon/3.0 Safari/534.12'
]


def get_headers():
    headers = {'User-Agent': random.choice(user_agents)}
    headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
    headers['Accept-Encoding'] = 'gzip, deflate, br'
    headers['Accept-Language'] = 'zh-CN,zh;q=0.9'
    headers['Cache-Control'] = 'max-age=0'
    headers['Connection'] = 'keep-alive'
    headers['Host'] = 'movie.douban.com'
    headers['Upgrade-Insecure-Requests'] = '1'
    return headers


def get_html(url):
    headers = get_headers()
    html = requests.get(url, headers=headers)
    return BeautifulSoup(html.text, 'lxml')


def handle_comment(html):
    '''
    处理评论信息
    :param html: html页面
    :return: list[dict]
    '''
    comment_list = []
    for ci in html.find_all(name='div', class_='comment-item'):
        comment = {}
        # 获取有用数
        votes = ci.find(name='span', class_='comment-vote').find(name='span', class_='votes')
        comment['votes'] = votes.get_text()
        # 获取用户的主页和名称
        comment_info = ci.find(name='span', class_='comment-info')
        user_info = comment_info.find(name='a')
        user_name = user_info.get_text()
        user_href = user_info['href']
        comment['user_href'] = user_href
        comment['user_name'] = user_name
        # comment['user'] = handle_user(user_href)
        # 获取评分和中文释义
        rating = comment_info.find(name='span', class_='rating')
        comment['title'] = None
        comment['allstar'] = None
        try:
            comment['title'] = rating['title']
            comment['allstar'] = rating['class'][0].replace('allstar', '')
        except Exception as e:
            print('username = {}'.format(user_name))
            print(e)
        # 获取评论的时间
        comment_time = comment_info.find(name='span', class_='comment-time') \
            .get_text().replace(' ', '').replace('\n', '')
        comment['time'] = comment_time
        # 获取评论的内容
        short = ci.find(name='span', class_='short')
        comment['short'] = short.get_text()
        comment_list.append(comment.copy())
    return comment_list


def handle_user(url):
    html = get_html(url)
    user_info = html.find(name='div', class_='user-info')
    user = {}
    user['site'] = user_info.find(name='a').get_text()
    info = user_info.find(name='div', class_='pl').get_text().split('  ')
    user['join'] = info[1].replace('加入', '')
    user['name'] = info[0]
    return user


def plan(comments, next_url):
    for comment in comments:
        r.hset('ychx', comment['user_href'], json.dumps(comment))
        r.sadd('plans', next_url)


def main():
    next_url = init_url
    while True:
        print(next_url)
        html = get_html(next_url)
        comment = handle_comment(html)
        plan(comment, next_url)
        next_page_url = html.find(name='a', class_='next')
        if next_page_url is None:
            break
        next_url = url_prefix + next_page_url['href']
        time.sleep(3)


main()
