# -*- coding: utf-8 -*-

"""
DateTime   : 2021/04/12 11:28
Author     : ZhangYafei
Description: 豆瓣电影爬取
https://movie.douban.com/j/search_subjects
"""
import csv
import os
from concurrent import futures
from concurrent.futures import ThreadPoolExecutor
from typing import Iterable, Any
from urllib.parse import quote

from prettytable import PrettyTable
from requests import Session
from zyf.timer import timeit


class DoubanCralwer:
    def __init__(self):
        self.url = 'https://movie.douban.com/j/search_subjects'
        self.movie_type_list = ['热门', '最新', '经典', '可播放', '豆瓣高分', '冷门佳片', '华语', '欧美', '韩国', '日本', '动作', '喜剧', '爱情', '科幻',
                                '悬疑', '恐怖',
                                '文艺']
        self.dir_path = 'data'
        if not os.path.exists(self.dir_path):
            os.mkdir(self.dir_path)
        self.filepath = f'{self.dir_path}/豆瓣电影.csv'
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36',
        }
        self.session = Session()
        self.session.headers = headers
        self.init_file()
        self.init_tips_info()

    def init_file(self):
        self.columns = ['类型', '电影名', '评分', 'url', '图片地址']
        self.file = open(self.filepath, encoding='utf_8_sig', mode='w', newline='')
        self.csv_file = csv.writer(self.file)
        self.csv_file.writerow(self.columns)

    def init_tips_info(self):
        self.tips_info_table = PrettyTable(['序号1', '电影分类1', '序号2', '电影分类2'])
        row = []
        for index, movie_type in enumerate(self.movie_type_list, start=1):
            row.extend([index, movie_type])
            if len(row) == 4:
                self.tips_info_table.add_row(row)
                row.clear()
                continue
            if index == len(self.movie_type_list):
                row.extend(['-', '-'])
                self.tips_info_table.add_row(row)

    @timeit
    def spider(self, name, limit):
        tag = quote(name)
        url = f'https://movie.douban.com/j/search_subjects?type=movie&tag={tag}&sort=recommend&page_limit={limit}&page_start=0'

        response = self.session.get(url)
        # 现在能够得到json类型的字符串，我们可以通过json.loads转化为python类型的字典
        result = response.json()
        results = result['subjects']
        rows = []
        for result in results:
            rate = result['rate']
            title = result['title']
            url = result['url']
            img = result['cover']
            rows.append([name, title, rate, url, img])
        self.save_to_csv(rows)

    def save_to_csv(self, rows: Iterable[Iterable[Any]]):
        self.csv_file.writerows(rows)

    def run(self):
        print(self.tips_info_table)
        movie_type_id = input('请选择电影类型(默认/回车为全部) >> ')
        if movie_type_id and movie_type_id.isdecimal() and int(movie_type_id) <= len(self.movie_type_list):
            movie_type = self.movie_type_list[int(movie_type_id) - 1]
        elif not movie_type_id:
            movie_type = None
        else:
            raise Exception('您选择的电影类型不正确')
        limit = input('请输入下载个数 >> ')
        if not limit.isdecimal():
            raise Exception('输入的数字必须为整数')
        if movie_type:
            self.spider(movie_type, limit)
        else:
            with ThreadPoolExecutor(max_workers=len(self.movie_type_list)) as pool:
                futures_list = (pool.submit(self.spider, movie_type, limit) for movie_type in self.movie_type_list)
                for future in futures.as_completed(futures_list):
                    if future.exception():
                        print(future.exception())


if __name__ == '__main__':
    douban = DoubanCralwer()
    douban.run()
