from bs4 import BeautifulSoup

import util
import os



# 国资要闻
def parse_content_gzyw(html, localPath):
    filePath = os.path.join(localPath, '国资要闻')
    if not os.path.exists(filePath):
        os.makedirs(filePath)
    soup = BeautifulSoup(html, 'lxml')
    # 查找 ul 元素
    ul_element = soup.find('ul', class_='list_mod2', opentype='page')

    if ul_element:
        # 遍历每个 li 元素
        for li in ul_element.find_all('li'):
            # 提取超链接和标题
            a_tag = li.find('a')
            if a_tag:
                href = a_tag.get('href')  # 超链接
                title = a_tag.get('title') or a_tag.text.strip()  # 标题

            # 提取时间
            time_span = li.find('span', class_='fr')
            time_text = time_span.text.strip() if time_span else None
            print("============================================================")
            # 判断 href 前缀并做不同处理
            if href.startswith('/cctgroup/xwzx'):
                # 处理以 'http://www.cmst.com.cn/zgwzcy' 开头的链接
                html_content = util.fetch_page_content('http://www.cmst.com.cn' + href)
                if html_content:
                    parsed_data = parse_content_gzyw_1(html_content, filePath)
                # 在这里添加针对该类型链接的处理逻辑
            else:
                print(f"标题: {title}, 链接: {href}, 时间: {time_text}")
                # 构建HTML内容并保存
                html_content = f"""<html>
                <head>
                    <meta charset="UTF-8">
                    <title>{title}</title>
                </head>
                <body>
                    <h1>{title}</h1>
                    <p><strong>发布时间:</strong> {time_text}</p>


                    <h2>正文链接</h2>
                    <p><strong>正文链接:</strong> {href}</p>
                </body>
                </html>"""

                # 构建文件路径和文件名
                filename = f"{title}-{time_text}.html"
                filename = util.sanitize_filename(filename)
                file_path = os.path.join(filePath, filename)

                # 确保目录存在
                os.makedirs(filePath, exist_ok=True)

                # 保存HTML文件
                with open(file_path, 'w', encoding='utf-8') as f:
                    f.write(html_content)

                print(f"HTML文件已保存至: {file_path}")


def parse_content_gzyw_1(html, filePath):
    soup = BeautifulSoup(html, 'lxml')

    # 提取 artTit 部分的信息
    art_tit_div = soup.find('div', class_='artTit')
    title = "无标题"
    source = "未知来源"
    publish_time = "未知时间"

    if art_tit_div:
        # 提取标题（h2 标签）
        h2 = art_tit_div.find('h2')
        title = h2.get_text(strip=True) if h2 else "无标题"

        # 提取文章来源和发布时间
        spans = art_tit_div.find_all('span')

        if len(spans) > 0:
            next_source = spans[0].next_sibling
            if next_source and next_source.strip():
                source = next_source.strip()
            else:
                source = spans[0].find_next(text=True, strip=True)

        if len(spans) > 1:
            next_time = spans[1].next_sibling
            if next_time and next_time.strip():
                publish_time = next_time.strip()
            else:
                publish_time = spans[1].find_next(text=True, strip=True)

    # 提取正文内容（包含图片和文本）
    content_div = soup.find('div', class_='content')
    content_items = []  # 存储段落和图片路径

    if content_div:
        w1000_div = content_div.find('div', class_='w1000')
        if w1000_div:

            # 先处理所有子节点
            for child in w1000_div.find_all(recursive=False):  # 只遍历直接子节点
                if child.name == 'p':
                    # 处理 <p> 标签
                    img_tag = child.find('img')
                    if img_tag and img_tag.get('src'):
                        img_url = img_tag['src']
                        img_path = util.download_and_save_image(img_url, os.path.join(filePath, 'img'))
                        if img_path:
                            content_items.append(('img', img_path))
                    else:
                        text = child.get_text(strip=True)
                        if text:
                            content_items.append(('text', text))


                elif child.name == 'img':

                    # 处理直接的 <img> 标签，支持 src 和 data-src
                    print("发现 img 标签，原始HTML为:", str(child))  # 可选：调试用
                    img_url = child.get('src') or child.get('data-src')
                    if img_url:
                        img_path = util.download_and_save_image(img_url, os.path.join(filePath, 'img'))
                        if img_path:
                            content_items.append(('img', img_path))
                    else:
                        print("警告：发现 <img> 标签但无法提取 src 或 data-src")

    # 打印信息
    print(f"标题: {title}")
    print(f"文章来源: {source}")
    print(f"发布时间: {publish_time}")
    print("\n正文内容：")
    for item_type, content in content_items:
        if item_type == 'text':
            print(content)
        elif item_type == 'img':
            print(f"图片路径: {content}")

    # 构建HTML内容
    html_content = f"""<html>
    <head>
        <meta charset="UTF-8">
        <title>{title}</title>
    </head>
    <body>
        <h1>{title}</h1>
        <p><strong>发布时间:</strong> {publish_time}</p>
        <p><strong>文章来源:</strong> {source}</p>
        <h2>正文内容</h2>
    """

    for item_type, content in content_items:
        if item_type == 'text':
            html_content += f"<p>{content}</p>\n"
        elif item_type == 'img':
            rel_img_path = os.path.relpath(content, filePath)  # 使用相对路径
            html_content += f'<img src="{rel_img_path}" style="max-width:600px;"><br><br>\n'

    html_content += """
    </body>
    </html>
    """

    # 构建文件路径和文件名
    filename = f"{title}-{publish_time}.html"
    filename = util.sanitize_filename(filename)
    file_path = os.path.join(filePath, filename)

    # 确保目录存在
    os.makedirs(filePath, exist_ok=True)

    # 保存HTML文件
    with open(file_path, 'w', encoding='utf-8') as f:
        f.write(html_content)

    print(f"HTML文件已保存至: {file_path}")

