import requests
from bs4 import BeautifulSoup
import util
import os
from urllib.parse import urljoin  # 用于处理相对路径


def parse_content_zhbd_1(html, filePath, base_url):
    soup = BeautifulSoup(html, 'lxml')

    # 提取外层 div.xilan
    xilan_div = soup.find('div', class_='xilan')
    if not xilan_div:
        print("未找到 class='xilan' 的 div 标签")
        return

    # 提取标题
    title_div = xilan_div.find('div', class_='channal_style')
    title = "无标题"
    if title_div:
        h2 = xilan_div.find('h2', class_='xilan_title')
        if h2:
            title = h2.get_text(strip=True)

    # 提取发布时间
    publish_time = "未知时间"
    time_p = title_div.find('p', class_='channal_color01') if title_div else None
    if time_p:
        publish_time = time_p.get_text(strip=True)

    # 提取正文内容和图片（核心修改部分）
    content_xilan = xilan_div.find('div', class_='content_xilan', id='zoom')
    content_items = []  # 存储文本和图片的混合列表（保留原始顺序）
    image_paths = []    # 存储图片URL（用于去重和下载）

    if content_xilan:
        # 1. 提取容器自身的直接文本---非党群工作
        # direct_texts = content_xilan.find_all(text=True, recursive=False)
        # for text in direct_texts:
        #     clean_text = text.strip().replace('\xa0', ' ').strip()
        #     if clean_text:
        #         content_items.append(('text', clean_text))

        # 2. 提取所有子元素（包括文本和图片），保留原始顺序
        for child in content_xilan.find_all(recursive=True):
            # 处理文本内容（子标签内的文本）
            # if child.name in ['div', 'p', 'span']: # 只提取div、p、span标签
            if child.name in ['div', 'p']:
                text = child.get_text(separator='', strip=True).replace('\xa0', ' ').strip()
                if text:
                    content_items.append(('text', text))

            # 处理图片标签（无论嵌套在哪个父标签中）
            if child.name == 'img' and child.get('src'):
                img_src = child['src']
                # 关键修复：将相对路径转换为绝对URL
                img_abs_url = urljoin(base_url, img_src)  # 拼接绝对路径
                if img_abs_url not in image_paths:
                    image_paths.append(img_abs_url)
                    content_items.append(('img', img_abs_url))  # 存储绝对URL
                    print(f"发现图片: {img_abs_url}")  # 增加日志

    # 下载图片并建立URL与本地路径的映射
    img_dir = os.path.join(os.path.dirname(filePath), 'img')
    os.makedirs(img_dir, exist_ok=True)
    img_url_to_local = {}
    for img_url in image_paths:
        try:
            local_path = util.download_and_save_image(img_url, img_dir)
            if local_path:
                img_url_to_local[img_url] = local_path
                print(f"图片下载成功: {img_url} → {local_path}")
            else:
                print(f"警告：图片下载失败: {img_url}")  # 下载失败日志
        except Exception as e:
            print(f"下载图片出错 {img_url}: {str(e)}")

    # 构建HTML内容
    html_content = f"""<html>
    <head>
        <meta charset="UTF-8">
        <title>{title}</title>
        <style>
            body {{ font-family: SimSun, "宋体", serif; line-height: 1.8; padding: 20px; }}
            p {{ margin: 10px 0; text-indent: 2em; }}
            img {{ display: block; margin: 20px auto; max-width: 80%; }}
        </style>
    </head>
    <body>
        <h2>{title}</h2>
        <p style="color: #666;">{publish_time}</p>
    """

    # 添加文本和图片（按原始顺序）
    for item_type, item_content in content_items:
        if item_type == 'text':
            html_content += f"<p>{item_content}</p>\n"
        elif item_type == 'img':
            local_path = img_url_to_local.get(item_content)
            if local_path:
                # 计算相对路径（从HTML文件到图片的路径）
                relative_path = os.path.relpath(local_path, os.path.dirname(filePath))
                # 修复路径分隔符（Windows下的\转换为/，避免HTML解析问题）
                relative_path = relative_path.replace('\\', '/')
                html_content += f'<img src="{relative_path}" style="max-width:80%;">\n<br>\n'
            else:
                html_content += f'<p>【图片缺失：{item_content}】</p>\n'  # 提示缺失

    # 结束标签
    html_content += """
    </body>
    </html>
    """

    # 保存HTML文件
    filename = f"{title}-{publish_time}.html"
    filename = util.sanitize_filename(filename) if hasattr(util, 'sanitize_filename') else filename.replace(':', '-').replace('/', '-')
    file_path = os.path.join(filePath, filename)
    os.makedirs(filePath, exist_ok=True)

    with open(file_path, 'w', encoding='utf-8') as f:
        f.write(html_content)

    print(f"HTML文件已保存至: {file_path}")


# 综合报道
def parse_content_zhbd(html, localPath, filePathname):
    # 主站基础URL（用于拼接图片的相对路径）
    base_url = 'http://www.cmstd.com.cn'
    filePath = os.path.join(localPath, filePathname)
    soup = BeautifulSoup(html, 'lxml')
    # 查找 ul 元素
    ul_element = soup.find('ul', class_='list_mod2', opentype='page')

    if ul_element:
        # 遍历每个 li 元素
        for li in ul_element.find_all('li'):
            # 提取超链接和标题
            a_tag = li.find('a')
            if not a_tag:
                continue  # 跳过无链接的li
            href = a_tag.get('href')
            title = a_tag.get('title') or a_tag.text.strip()

            # 提取时间
            time_span = li.find('span', class_='fr')
            time_text = time_span.text.strip() if time_span else None

            # 判断 href 前缀并做不同处理
            if href.startswith('/cmstd/'):
                # 处理站内链接，拼接绝对URL
                full_url = urljoin(base_url, href)
                print(f"抓取页面: {full_url}")
                html_content = util.fetch_page_content(full_url)  # 用绝对URL获取页面
                if html_content:
                    # 传入base_url用于图片路径处理
                    parse_content_zhbd_1(html_content, filePath, base_url)
                print(f"标题: {title},{filePathname}链接: {full_url}, 时间: {time_text}")
            elif href.startswith('https://mp.weixin.qq.com/'):
                # 微信链接处理（保持原样）
                html_content = f"""<html>
                                <head>
                                    <meta charset="UTF-8">
                                    <title>{title}</title>
                                </head>
                                <body>
                                    <p><strong>发布时间:</strong>  {time_text}</p>
                                    <p><strong>正文链接:</strong> <a href="{href}" target="_blank">{href}</a></p>
                                </body>
                                </html>"""
                filename = f"{title}-{time_text}.html"
                filename = util.sanitize_filename(filename) if hasattr(util, 'sanitize_filename') else filename.replace(':', '-').replace('/', '-')
                file_path = os.path.join(filePath, filename)
                os.makedirs(filePath, exist_ok=True)
                with open(file_path, 'w', encoding='utf-8') as f:
                    f.write(html_content)
                print(f"生成微信链接页面: {file_path}")
            else:
                # 其他链接处理
                html_content = f"""<html>
                                <head>
                                    <meta charset="UTF-8">
                                    <title>{title}</title>
                                </head>
                                <body>
                                     <p><strong>发布时间:</strong>  {time_text}</p>
                                    <p><strong>正文链接:</strong> <a href="{href}" target="_blank">{href}</a></p>
                                </body>
                                </html>"""
                filename = f"{title}-{time_text}.html"
                filename = util.sanitize_filename(filename) if hasattr(util, 'sanitize_filename') else filename.replace(':', '-').replace('/', '-')
                file_path = os.path.join(filePath, filename)
                os.makedirs(filePath, exist_ok=True)
                with open(file_path, 'w', encoding='utf-8') as f:
                    f.write(html_content)
                print(f"生成其他链接页面: {file_path}")
    else:
        print("未找到 class='list_mod2' 的 ul 标签")