import requests
from bs4 import BeautifulSoup
import os
from urllib.parse import urljoin
import time


def fetch_and_generate_html(url, output_dir):
    """
    抓取指定URL中目标链接的内容，按公司单独生成HTML文件
    """
    try:
        # 创建输出目录（确保目录存在）
        os.makedirs(output_dir, exist_ok=True)

        # 1. 发送请求获取主页面内容
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36"
        }
        response = requests.get(url, headers=headers, timeout=10)
        response.encoding = response.apparent_encoding
        main_soup = BeautifulSoup(response.text, 'lxml')

        # 2. 提取目标ul中的所有链接
        target_ul = main_soup.find('ul', class_='list_mod5 list_mod4', opentype='page')
        if not target_ul:
            print("未找到目标ul标签 (class='list_mod5 list_mod4')")
            return

        table = target_ul.find('table')
        if not table:
            print("目标ul中未找到表格")
            return

        # 存储链接信息：(标题, 完整链接)
        link_info = []
        for a_tag in table.find_all('a', istitle="true"):
            title = a_tag.get('title', '无标题')
            full_link = urljoin(url, a_tag['href'])
            link_info.append((title, full_link))

        if not link_info:
            print("未提取到有效链接")
            return

        # 3. 逐个访问链接，提取内容并生成单独的HTML文件
        for i, (title, link) in enumerate(link_info, 1):
            try:
                print(f"抓取中 ({i}/{len(link_info)}): {title}")
                # 访问子页面
                sub_response = requests.get(link, headers=headers, timeout=15)
                sub_response.encoding = sub_response.apparent_encoding
                sub_soup = BeautifulSoup(sub_response.text, 'lxml')

                # 精准定位内容容器
                content_container = sub_soup.find('div', class_='content_xilan', id='zoom')
                if not content_container:
                    print(f"警告：{title} 页面未找到 <div class='content_xilan' id='zoom'>")
                    p_texts = ["未找到有效内容"]
                else:
                    # 提取容器内所有p标签的文本
                    p_tags = content_container.find_all('p')
                    p_texts = [p.get_text(strip=True).replace('\xa0', ' ') for p in p_tags if p.get_text(strip=True)]
                    if not p_texts:
                        p_texts = ["该页面无p标签内容"]

                # 4. 为每个公司生成单独的HTML文件
                # 处理文件名（移除非法字符）
                safe_title = title.replace(':', '-').replace('/', '-').replace('\\', '-').replace('*', '-').replace('?',
                                                                                                                    '-').replace(
                    '"', '-').replace('<', '-').replace('>', '-').replace('|', '-')
                filename = f"{safe_title}.html"
                output_path = os.path.join(output_dir, filename)

                # 构建HTML内容
                html_content = f"""<!DOCTYPE html>
<html lang="zh-CN">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>{title}</title>
    <style>
        body {{
            font-family: "SimSun", "宋体", serif;
            max-width: 1000px;
            margin: 0 auto;
            padding: 20px;
            line-height: 1.8;
        }}
        .container {{
            padding: 20px;
            border: 1px solid #e0e0e0;
            border-radius: 8px;
        }}
        h2 {{
            color: #2c3e50;
            border-bottom: 2px solid #3498db;
            padding-bottom: 8px;
            margin-top: 0;
        }}
        .p-content {{
            margin: 10px 0;
            text-indent: 2em;  /* 首行缩进 */
        }}
        .source-link {{
            color: #666;
            font-size: 0.9em;
            margin-top: 15px;
            padding-top: 10px;
            border-top: 1px dashed #ccc;
        }}
        .source-link a {{
            color: #3498db;
            text-decoration: none;
        }}
        .source-link a:hover {{
            text-decoration: underline;
        }}
    </style>
</head>
<body>
    <div class="container">
        <h2>{title}</h2>
"""
                # 添加p标签内容
                for text in p_texts:
                    html_content += f"        <p class='p-content'>{text}</p>\n"

                # 添加来源链接
                html_content += f"""        <div class='source-link'>来源：<a href='{link}' target='_blank'>{link}</a></div>
    </div>
</body>
</html>"""

                # 保存单个公司的HTML文件
                with open(output_path, 'w', encoding='utf-8') as f:
                    f.write(html_content)

                print(f"已生成文件：{output_path}")

                # 控制爬取频率
                time.sleep(1)

            except Exception as e:
                print(f"抓取 {title} 失败：{str(e)}")

        print(f"\n处理完成！共生成 {len(link_info)} 个HTML文件")
        print(f"文件保存目录：{output_dir}")

    except Exception as e:
        print(f"执行出错：{str(e)}")


if __name__ == "__main__":
    target_url = "http://www.cmstd.com.cn/cmstd/gyzc/gnwd/hydq/index.html"
    # 输出目录：D:\aGetDataFrom111111\中储发展股份有限公司网站\01关于中储\05公司网络
    output_directory = r"D:\aGetDataFrom111111\中储发展股份有限公司网站\01关于中储\05公司网络"
    fetch_and_generate_html(target_url, output_directory)