# coding=utf-8

import codecs
import logging
import os
import re
import time

import html2text
import requests
from bs4 import BeautifulSoup

from src.com.itheima.web.util.html_utll import HtmlClient
from src.com.itheima.web.util.selenium_util import SeleniumUtil


class UrlLink:
    def __init__(self, url, name):
        self.url = url
        self.name = name



class BlogService:
    def __init__(self,save_dir=None):
        self.html_client=HtmlClient()
        self.save_dir=save_dir


    def get_blog_content(self,url):
        try:
            headers = {
                'User-Agent': 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36'
            }
            resp1 = requests.get(url, headers=headers, timeout=30_0000)
            resp1.encoding = 'utf-8'
            page=resp1.text
            return page
        except:
            # logging.warning("No get_blog_content......")
            this = SeleniumUtil(load_selenium=True, headless_flag=False, driver=None)
            url = 'https://blog.csdn.net/qq_42600094/article/details/130639697'
            this.open_url(URL=url)
            time.sleep(15)
            return this.source_code()

    def sanitize_filename(self,filename):
        # 定义非法字符的正则表达式
        illegal_chars = r'[\\/:*?"<>|]'
        # 将非法字符替换为下划线
        sanitized_name = re.sub(illegal_chars, '', filename)
        return sanitized_name

    def generate_hexo_front_matter(self, title, date, categories, tags):
        """生成 Hexo Front Matter"""
        front_matter = f"---\ntitle: {title}\ndate: {date}\n"
        if categories:
            front_matter += f"categories:\n- {categories}\n"
        if tags:
            front_matter += f"tags:\n- {tags}\n"
        front_matter += "---\n"
        return front_matter

    def html2md(self, page,group=None):
        """将 HTML 转换为 Markdown 并保存为 Hexo 格式"""
        try:
            soup = BeautifulSoup(page, 'html.parser')
            content = soup.find(id="content_views")
            title_article = soup.find(attrs={'class': 'title-article'})
            file_name = self.sanitize_filename(title_article.get_text().strip())
            # 提取 Hexo Front Matter 数据
            date = soup.find(attrs={'class': 'bar-content'}).find(attrs={'class': 'time'}).get_text().strip()
            categories = ''
            tags = ''
            try:
                categories = soup.find(attrs={'class': 'tags-box space'}).find(
                    attrs={'class': 'tag-link'}).get_text().strip()
            except AttributeError:
                logging.warning("No categories found.")
            try:
                tags = soup.find(attrs={'class': 'tags-box artic-tag-box'}).get_text().strip().split('：')[1]
            except (AttributeError, IndexError):
                logging.warning("No tags found.")
            # 生成 Hexo Front Matter
            hexo_front_matter = self.generate_hexo_front_matter(file_name, date, categories, tags)

            # 增强逻辑：保留字体颜色等样式
            for tag in content.find_all(True):
                if tag.name == 'font' and tag.has_attr('color'):
                    # 处理 <font color="red"> 标签
                    color = tag['color']
                    tag.insert_before(f'<span style="color:{color}">')
                    tag.insert_after('</span>')
                elif tag.has_attr('style') and 'color' in tag['style']:
                    # 处理 style="color:red" 属性
                    style = tag['style']
                    color = style.split('color:')[1].split(';')[0].strip()
                    tag.insert_before(f'<span style="color:{color}">')
                    tag.insert_after('</span>')

            # 将 HTML 转换为 Markdown
            text_maker = html2text.HTML2Text()
            text_maker.ignore_links = False  # 保留链接
            text_maker.bypass_tables = False  # 保留表格
            text_maker.ignore_images = False  # 保留图片
            text_maker.body_width = 0  # 不自动换行
            markdown_content = text_maker.handle(content.prettify())
            # 保存文件
            if group!=None and len(group)>0:
                save_dir = os.path.join(self.save_dir, group)
            os.makedirs(save_dir, exist_ok=True)
            file_path = os.path.join(save_dir, f"{file_name}.md")
            with codecs.open(file_path, 'w', encoding='utf-8') as f:
                f.write(hexo_front_matter)
                f.write(markdown_content)
            logging.info(f"File saved successfully: {file_path}")
            return True
        except Exception as e:
            logging.error(f"Error converting HTML to Markdown: {e}")
            return False


    def down_blog_single(self,url,group=None):
        page_html=self.get_blog_content(url=url)
        self.html2md(page=page_html,group=group)

    def get_article_urls(self,url):
        html=self.get_blog_content(url=url)
        a_tags = self.html_client.find_elements(html=html, xpath_exp="//ul[@class='column_article_list']//li//a")
        links=[]
        for a_tag in a_tags:
            a_html = self.html_client.to_str(a_tag)
            href_value = self.html_client.find_element(html=a_html, xpath_exp='//a/@href')
            links.append(str(href_value))
        return links


    def get_article_urls_ofuser(self,url):
        page = self.get_blog_content(url=url)
        a_tags = self.html_client.find_elements(html=page,xpath_exp="//div[@class='aside-common-box-content aside-box-fold']//ul//li//a")
        # 存放该用户下的所有分组
        links = []
        index=0
        for a_tag in a_tags:
            a_html = self.html_client.to_str(a_tag)
            href_value = self.html_client.find_element(html=a_html, xpath_exp='//a/@href')
            title=self.html_client.extarct_view_text(a_html)
            links.append(UrlLink(url=str(href_value),name=title))
            index+=1
            # break
        return links

if __name__ == '__main__':
    url='https://blog.csdn.net/qq_53324833/article/details/139023174'
    BlogService(save_dir='./').down_blog_single(url=url, group='zh')

