import requests  # 网络请求
from bs4 import BeautifulSoup  # 网页解析
from markdownify import MarkdownConverter  # html 转 markdown
from tqdm import tqdm  # 进度条
import warnings
import sys
import json
import os
import re
from typing import Literal

split_re = re.compile(r'[<>/\\|:*?"]')


def _get_valid_filename(raw_str: str):
    return '_'.join(split_re.split(raw_str))


class ZhiHuHandler(MarkdownConverter):
    """知乎搬运工"""
    def __init__(self,
                 ROOT_PATH: str,
                 MEDIA_PATH: str = None,
                 cookies: dict = None,
                 zvideo_resolution: Literal['LD', 'SD', 'HD', 'FHD'] = 'HD',
                 video_resolution: Literal['LD', 'SD', 'HD'] = 'HD',
                 disable_cache: bool = False,
                 parser: Literal['lxml', 'html.parser'] = "lxml",
                 headers: dict = None,
                 **options):
        super().__init__(
            escape_asterisks=False,
            escape_underscores=False,
            heading_style='ATX',
            code_language_callback=lambda el: el.code["class"][0][9:],
            **options)
        # 检查参数
        if ROOT_PATH is None:
            raise ValueError('ROOT_PATH 不能为 NoneType')
        if not os.path.exists(ROOT_PATH):
            os.mkdir(ROOT_PATH)
        elif not os.path.isdir(ROOT_PATH):
            raise ValueError('ROOT_PATH 必须是一个文件夹')
        if MEDIA_PATH:
            self.REL_MEDIA_PATH = os.path.relpath(MEDIA_PATH, ROOT_PATH)
            if not os.path.exists(MEDIA_PATH):
                os.mkdir(MEDIA_PATH)
            elif not os.path.isdir(MEDIA_PATH):
                raise ValueError('MEDIA_PATH 必须是一个文件夹')
        if video_resolution.upper() not in ['LD', 'SD', 'HD']:
            raise ValueError('video_resolution 必须为 LD/SD/HD')
        if zvideo_resolution.upper() not in ['LD', 'SD', 'HD', 'FHD']:
            raise ValueError('video_resolution 必须为 LD/SD/HD/FHD')
        self.ROOT_PATH = ROOT_PATH
        self.MEDIA_PATH = MEDIA_PATH
        self.cookies = cookies
        self.type = None
        self.id = None
        self.file_name = None
        self.references = None
        self.img_cnt = 0
        self.video_cnt = 0
        self.video_resolution = video_resolution.upper()
        self.zvideo_resolution = zvideo_resolution.upper()
        self.disable_cache = disable_cache
        self.parser = parser
        if headers:
            self.headers = headers
        else:
            self.headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0"
            }

    def convert(self, html):
        self.references = dict()
        soup = BeautifulSoup(html, self.parser)
        text = self.convert_soup(soup)
        if self.references:
            text += '**参考**\n\n'
            references_text = ''
            for key in self.references.keys():
                references_text += f'[^{key}]: {self.references[key]}  \n'
            text += references_text
        return text

    def convert_sup(self, el, text, convert_as_inline):
        if el.attrs.get('data-draft-type') == 'reference':  # 特判参考文献
            data_text = el.attrs.get('data-text', '')
            data_url = el.attrs.get('data-url', '')
            data_numero = el.attrs.get('data-numero', '')
            if data_numero not in self.references:
                self.references[data_numero] = fr'{data_text} <{data_url}>'
            return fr"[^{data_numero}]"
        return super().convert_sup(el, text, convert_as_inline)

    def convert_code(self, el, text, convert_as_inline):
        if text and text[-1] == '\n':
            text = text[:-1]  # 去掉代码中可能多出来的最后一个空行
        return super().convert_code(el, text, convert_as_inline)

    def convert_img(self, el, text, convert_as_inline):
        if (eeimg := el.attrs.get('eeimg', None)) and (eeimg == '1'):  # 特判处理文章/回答中的公式
            tex = el.attrs.get('alt', None)
            if len(tex) >= 2 and tex[-2:] == r'\\':
                return '$$\n' + tex[:-2] + '\n$$\n'  # 最后换行以避免两个行间公式连续出现的情况
            else:
                return '$' + tex + '$'
        return super().convert_img(el, text, convert_as_inline)

    def convert_figure(self, el, text, convert_as_inline):
        # 处理文章中/回答中的图片
        img_el = None
        if not el.noscript:
           img_el = el.img
        elif not el.noscript.img:
            if not el.noscript.div:
                img_el = None
            else:
                img_el = el.noscript.div.img
        else:
            img_el = el.noscript.img
        if not img_el:
            warnings.warn("非预期的 figure 标签")
            return f"<非预期的 figure 标签>"
        caption = el.figcaption.text if el.figcaption else ''
        self.img_cnt += 1
        src = self._load_media(img_el.attrs['src'], f'img{self.img_cnt}') if self.MEDIA_PATH else img_el.attrs['src']
        return f"![{caption}]({src})\n\n"

    def convert_a(self, el, text, convert_as_inline):
        if 'video-box' in el.attrs['class']:  # 特判处理文章/回答中的视频
            video_url = None
            request_url = None
            title = None
            try:
                request_url = f'https://lens.zhihu.com/api/v4/videos/{el.attrs["data-lens-id"]}'
                json_data = json.loads(requests.get(request_url).text)
                video_url = json_data['playlist'][self.video_resolution]['play_url']
                title = json_data["title"]
                self.img_cnt += 1
                img_src = self._load_media(el.img["src"], f'img{self.img_cnt}') if self.MEDIA_PATH else el.img["src"]
                self.video_cnt += 1
                video_src = self._load_media(video_url, f'video{self.video_cnt}-{self.video_resolution}', 'mp4') if self.MEDIA_PATH else video_url
                return f'[![]({img_src}){title}]({video_src})\n'
            except Exception:
                if not video_url or not title:
                    warnings.warn('Unexpected json_data for video-box.')
                else:
                    warnings.warn(f'出错了，请检查您能否通过浏览器正常访问 {request_url}')
                return ''
        converted_text = super().convert_a(el, text, convert_as_inline)
        if 'link-card' == el.attrs['data-draft-type']:
            converted_text += '\n\n'
        return converted_text

    def _load_media(self, src: str, filename: str, suffix: str=None):
        try:
            if not suffix:
                suffix = src.split('/')[-1].split('.')[-1].split('?')[0]
            filename += f'.{suffix}'
            file_path = os.path.join(self.MEDIA_PATH, f'{self.type}{self.id}')
            if not os.path.exists(file_path):
                os.makedirs(file_path)
            file_path = os.path.join(file_path, filename)
            if not self.disable_cache and os.path.exists(file_path):
                print(f'{self.type}{self.id}/{filename} already exists.')
                return os.path.join(self.REL_MEDIA_PATH, f'{self.type}{self.id}', filename)
            response = requests.get(src, stream=True)
            total = int(response.headers.get('content-length', 0))
            chunk_size = 1024
            with open(file_path, 'wb') as f, tqdm(
                desc=f'{self.type}{self.id}/{filename}',
                total=total,
                unit='iB',
                unit_scale=True,
                unit_divisor=chunk_size,
            ) as bar:
                for data in response.iter_content(chunk_size=chunk_size):
                    size = f.write(data)
                    bar.update(size)
            return os.path.join(self.REL_MEDIA_PATH, f'{self.type}{self.id}', filename)
        except Exception:
            warnings.warn(f'下载图像 {src} 失败')
            return src

    @staticmethod
    def _download_mp4(url: str, file_path: str, suffix: str= 'mp4'):
        dir_path, file_name = os.path.split(file_path)
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        if '.' not in file_name:
            if not suffix and '.' in url:
                suffix = url.split('/')[-1].split('.')[-1].split('?')[0]
            if suffix:
                file_name += '.' + suffix
        file_path = os.path.join(dir_path, file_name)
        try:
            response = requests.get(url, stream=True)
            total = int(response.headers.get('content-length', 0))
            chunk_size = 1024
            with open(file_path, 'wb') as f, tqdm(
                desc=file_name,
                total=total,
                unit='iB',
                unit_scale=True,
                unit_divisor=chunk_size,
            ) as bar:
                for data in response.iter_content(chunk_size=chunk_size):
                    size = f.write(data)
                    bar.update(size)
        except Exception:
            warnings.warn(f'下载视频 {url} 失败')
            raise

    def get_html(self, url: str):
        result = None
        try:
            result = requests.get(url, cookies=self.cookies, headers=self.headers)
        except ValueError:
            sys.stderr.write('出错了，请检查您的计算机网络是否正常！')
            exit(0)
        except Exception:
            sys.stderr.write(f'出错了，请检查您能否通过浏览器正常访问 {url}')
            exit(0)
        ## for debug
        # with open('res.html', 'wb') as f:
        #     f.write(result.content)
        return result.content.decode('utf-8')

    def _get_js_initialData(self, url):
        return json.loads(BeautifulSoup(self.get_html(url), self.parser).select("#js-initialData")[0].text)

    def export_markdown_from(self, url: str):
        """从 url 导出一篇知乎文章/回答"""
        try:
            entities = self._get_js_initialData(url)['initialState']['entities']
            for type in ['article', 'answer']:
                types = type + 's'
                if not entities[types]: continue
                self.type = type
                for item in entities[types].values():
                    if type == 'answer' and item.get('attachment') and item['attachment']['type'] == 'video':
                        self.export_zvideo_from('https://www.zhihu.com/zvideo/' + item['attachment']['video']['zvideoId'])
                        return
                    self.id = item["id"]
                    title = item["title"] if type == 'article' else item["question"]["title"]
                    title = _get_valid_filename(title)
                    self.file_name = os.path.join(self.ROOT_PATH, title + f"--{type}{self.id}.md")
                    content = item["content"]
                    if "paidInfo" in item:
                        if item["paidInfo"]["hasPurchased"]:
                            content = item["paidInfo"]["content"]
                        else:
                            warnings.warn(f'该内容({url})为付费内容，导出完整内容请在知乎付费后提供 cookies')
                    self.img_cnt = self.video_cnt = 0
                    with open(self.file_name, 'w', encoding='utf-8') as f:
                        f.write(f"**原文链接:** <{url}>\n**转载请注明出处！**\n\n" + self.convert(content))
                    print(f'{types}/{self.id} "{title}" 已成功导出至: "{self.file_name}"')
                    return
            raise Exception
        except Exception:
            sys.stderr.write(f'您的链接不正确或者 cookie 已到期，请检查链接或使用 --update_cookie 选项更新 cookie\n')
            raise

    def export_zvideo_from(self, url: str):
        """从 url 导出一个知乎视频"""
        try:
            zvideos = self._get_js_initialData(url)['initialState']['entities']["zvideos"]
            for item in zvideos.values():
                self.type = item["type"]
                types = self.type + 's'
                self.id = item["id"]
                title = item["title"]
                title = _get_valid_filename(title)
                play_url = item["video"]["playlist"][self.zvideo_resolution.lower()]["playUrl"]
                self.file_name = os.path.join(self.ROOT_PATH, f'{title}--{self.type}{self.id}-{self.zvideo_resolution}.mp4')
                self._download_mp4(play_url, self.file_name)
                print(f'{types}/{self.id} "{title}" 已成功导出至: "{self.file_name}"')
                return
            raise Exception
        except Exception:
            warnings.warn(f"导出失败，url: {url}")
            raise