import requests
import json
import time
import random
from fake_useragent import UserAgent
import re

class DoubanSpider:
    def __init__(self):
        self.url = 'https://movie.douban.com/j/chart/top_list?type={}&interval_id=100%3A90&action=&start={}&limit=20'

    def get_html(self, url):
        """功能函数1"""
        headers = {'User-Agent':UserAgent(path='fake_useragent.json').random}
        html = requests.get(url=url, headers=headers).text

        return html

    def parse_html(self, url):
        """爬虫逻辑函数"""
        html = self.get_html(url)
        # html: [{},{},...{}]
        html = json.loads(html)
        for one_film_dict in html:
            item = {}
            item['rank'] = one_film_dict['rank']
            item['title'] = one_film_dict['title']
            item['score'] = one_film_dict['score']
            print(item)

    def get_total(self, id):
        """获取电影总数量"""
        total_url = 'https://movie.douban.com/j/chart/top_list_count?type={}&interval_id=100%3A90'.format(id)
        total_html = self.get_html(total_url)
        total_html = json.loads(total_html)
        total = total_html['total']

        return total

    def get_all_type_dict(self):
        """获取所有类别及对应id值的大字典"""
        d = {}
        index_url = 'https://movie.douban.com/chart'
        index_html = self.get_html(index_url)
        regex = '<span><a href=.*?type_name=(.*?)&type=(.*?)&interval_id=100:90&action=">'
        # r_list: [('喜剧',24),('剧情',11),...]
        r_list = re.findall(regex, index_html, re.S)
        for r in r_list:
            d[ r[0] ] = r[1]

        return d

    def crawl(self):
        # 用户输入：爱情 ,想获取到对应的id的值
        # {'剧情':'11','喜剧':'24,'爱情':'13', ...}
        all_type_dict = self.get_all_type_dict()
        menu = ''
        for k in all_type_dict:
            menu += k + '|'

        print(menu)
        choice = input('请选择电影类别:')
        # 获取对应id的值
        id = all_type_dict[choice]

        total = self.get_total(id)
        for start in range(0, total, 20):
            page_url = self.url.format(id, start)
            self.parse_html(url=page_url)
            # 控制频率
            time.sleep(random.randint(0, 1))

if __name__ == '__main__':
    spider = DoubanSpider()
    spider.crawl()



















