#!/usr/bin/env python3
# description: 获取知乎钓鱼贴下所有图片
# author: stablegenius
# date: 2021-02-01

import re
import os
from math import ceil
import asyncio
import aiohttp
from aiofile import async_open

class LSP:
    """
    使用该类完成所有与知乎钓鱼贴相关的功能
    """
    def __init__(self, cookie: str):
        """
        初始化类时需要用户提供cookie以完成模拟登录

        @param cookie: 从浏览器的开发者工具中复制出来的cookie
        """
        self.limit = 20  # 单次访问获取的答案条数
        self.img_lst = set()  # 在这里存储图片地址，使用集合去重

        self.base_url = 'https://www.zhihu.com/api/v4/questions'
        self.answer_url = 'answers'

        self.params = {
            'include': r'data[*].content',
            'limit': self.limit,
            'offset': 0, # 代码运行过程中需要动态修改offset
            'platform': 'desktop',
            'sort_by': 'default',
            }
        headers = {
            'accept': '*/*',
            'cookie': cookie,  # 需要用户提供cookie以完成模拟登录
            'referer': 'https://www.zhihu.com',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.56',
            }

        # 使用aiohttp模块提供的功能去获取网页数据
        conn = aiohttp.TCPConnector(limit=10, force_close=True, enable_cleanup_closed=True, ssl=False)
        # 类似于requests的Session
        self.session = aiohttp.ClientSession(headers = headers, connector=conn)

        # 新建一个文件夹用来放置照片
        self.folder = os.path.join(os.path.dirname(os.path.abspath(__file__)) , 'img')
        if not os.path.exists(self.folder) or os.path.isfile(self.folder):
            os.mkdir(self.folder)


    async def _fetch_img_url(self, question_id: str, index: int = 0) -> int:
        """
        获取单页回答所有图片的下载地址

        @param question_id: 钓鱼贴的编号
        @param index: 回答的页码
        @return: 帖子下所有回答的个数
        """
        self.params.update({'offset': index * self.limit})
        async with self.session.get(f'{self.base_url}/{question_id}/{self.answer_url}', params = self.params) as resp:
            answer = await resp.json()

        # 获取图片下载链接
        if data := answer['data']:
            print(f'成功获取到帖子 {question_id} 第 {index+1} 页的所有回答...')
            for item in data:
                self.img_lst.update(re.findall(r'data-original=\"(.*?)\?', item['content']))

        # 其实可以没有返回值的，这里返回回答的总条目数，方便外层函数确定总页数
        return answer['paging']['totals']

    async def _download_img(self, url: str) -> None:
        """
        下载图片

        @param url: 图片下载地址
        """
        img_name = url.split('/')[-1]
        async with self.session.get(url) as resp:
            async with async_open(os.path.join(self.folder, img_name), 'wb') as f:
                try:
                    await f.write(await resp.read())
                except:
                    print(f'下载图片 {img_name} 时出错！')
                else:
                    print(f'成功下载图片 {img_name} ！')

    async def go(self, question_id: str) -> None:
        """
        流程控制，完成所有功能

        @param question_id: 钓鱼贴的编号
        """
        # 获取所有答案的页数
        answer_total_count = await self._fetch_img_url(question_id)
        answer_page_count = ceil(answer_total_count / self.limit)

        # 将所有回答的所有图片地址都载入内存
        await asyncio.wait([asyncio.create_task(self._fetch_img_url(question_id, _)) for _ in range(answer_page_count)])
        
        print(f'我发现 {question_id} 帖子下面总共有 {len(self.img_lst)} 张图片！')

        # 将所有图片下载到目标文件夹
        await asyncio.wait([asyncio.create_task(self._download_img(_)) for _ in self.img_lst])

if __name__ == '__main__':
    app = LSP(r'Your Own Cookie')
    loop = asyncio.get_event_loop()
    loop.run_until_complete(app.go('390232900'))
