"""
需要导入第三方模块
cmd控制台输入以下命令并执行：
pip install requests
pip install urllib3
pip install bs4
"""

import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import random
from bs4 import BeautifulSoup
import re


def browse_url(url: str, retries: int = 3, timeout: int = 10) -> str:
    session = requests.Session()
    USER_AGENTS = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:90.0) Gecko/20100101 Firefox/90.0',
        'Mozilla/5.0 (X11; Linux i686; rv:89.0) Gecko/20100101 Firefox/89.0',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 11.5; rv:90.0) Gecko/20100101 Firefox/90.0',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Safari/605.1.15',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.59',
    ]
    headers = {
        'User-Agent': random.choice(USER_AGENTS),
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.5',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Cache-Control': 'max-age=0',
    }
    retry_strategy = Retry(
        total=retries,
        backoff_factor=1,
        status_forcelist=[429, 500, 502, 503, 504],
        allowed_methods=["GET", "POST"]
    )
    adapter = HTTPAdapter(max_retries=retry_strategy)
    session.mount('https://', adapter)
    session.mount('http://', adapter)

    try:
        response = session.get(
            url,
            headers=headers,
            timeout=timeout,
            allow_redirects=True
        )
        response.raise_for_status()
        if not response.encoding or response.encoding == 'ISO-8859-1':
            response.encoding = response.apparent_encoding
        return response.text
    except requests.exceptions.RequestException as e:
        print(f"请求异常: {e}")
        return ""
    except Exception as e:
        print(f"未知错误: {e}")
        return ""


class HTMLToMarkdownConverter:
    def __init__(self):
        self.indent_char = '  '
        self.list_markers = ['*', '-', '+']

    def convert(self, html: str) -> str:
        soup = BeautifulSoup(html, 'html.parser')
        return self._process_element(soup.body or soup)

    def _process_element(self, element, indent_level=0) -> str:
        if element is None:
            return ''
        if isinstance(element, str):
            return self._escape_text(element)
        markdown = ''
        tag = element.name
        if tag == 'p':
            markdown += self._process_children(element) + '\n\n'
        elif tag == 'h1':
            markdown += '# ' + self._process_children(element) + '\n\n'
        elif tag == 'h2':
            markdown += '## ' + self._process_children(element) + '\n\n'
        elif tag == 'h3':
            markdown += '### ' + self._process_children(element) + '\n\n'
        elif tag == 'h4':
            markdown += '#### ' + self._process_children(element) + '\n\n'
        elif tag == 'h5':
            markdown += '##### ' + self._process_children(element) + '\n\n'
        elif tag == 'h6':
            markdown += '###### ' + self._process_children(element) + '\n\n'
        elif tag == 'ul':
            markdown += self._process_list(element, indent_level, is_ordered=False)
        elif tag == 'ol':
            markdown += self._process_list(element, indent_level, is_ordered=True)
        elif tag == 'li':
            marker = self._get_list_marker(indent_level, is_ordered=False)
            content = self._process_children(element)
            markdown += f"{marker} {content}\n"
        elif tag == 'a':
            href = element.get('href', '')
            text = self._process_children(element)
            markdown += f"[{text}]({href})"
        elif tag == 'img':
            src = element.get('src', '')
            alt = element.get('alt', '')
            markdown += f"![{alt}]({src})"
        elif tag == 'strong' or tag == 'b':
            markdown += f"**{self._process_children(element)}**"
        elif tag == 'em' or tag == 'i':
            markdown += f"*{self._process_children(element)}*"
        elif tag == 'code':
            markdown += f"{self._process_children(element)}"
        elif tag == 'pre':
            code_content = self._process_children(element)
            markdown += f"```\n{code_content}\n```\n\n"
        elif tag == 'blockquote':
            blockquote_content = self._process_children(element).strip()
            blockquote_lines = blockquote_content.split('\n')
            quoted_lines = ['> ' + line for line in blockquote_lines]
            markdown += '\n'.join(quoted_lines) + '\n\n'
        elif tag == 'br':
            markdown += '  \n'
        elif tag == 'hr':
            markdown += '---\n\n'
        else:
            markdown += self._process_children(element)
        return markdown
    def _process_children(self, element) -> str:
        return ''.join(self._process_element(child) for child in element.children)
    def _process_list(self, list_element, indent_level, is_ordered) -> str:
        markdown = ''
        items = list_element.find_all('li', recursive=False)
        for i, item in enumerate(items):
            if is_ordered:
                marker = f"{i + 1}."
            else:
                marker = self._get_list_marker(indent_level)

            item_content = self._process_element(item, indent_level)
            item_content = re.sub(rf'^{re.escape(marker)}\s+', '', item_content)
            markdown += f"{marker} {item_content}"
        return markdown + '\n'
    def _get_list_marker(self, indent_level, is_ordered=False):
        if is_ordered:
            return '1.'
        else:
            markers = self.list_markers
            return markers[indent_level % len(markers)] + ' '
    def _escape_text(self, text: str) -> str:
        return text


def html_to_markdown(html_content: str) -> str:
    converter = HTMLToMarkdownConverter()
    return converter.convert(html_content)

if __name__ == "__main__":
    url = "https://blog.csdn.net/u010189239/article/details/149022041"
    html_content = browse_url(url)
    soup = BeautifulSoup(html_content, 'html.parser')
    elements = soup.find_all("div", id="content_views")
    htmls = ""
    for element in elements:
        htmls += str(element)
    markdown = html_to_markdown(htmls)
    file = open("作业.md", mode="w", encoding="utf-8")
    file.write(markdown)
    file.close()