import os
import requests
from bs4 import BeautifulSoup
import time


def download_html(url):
    response = requests.get(url)
    if response.status_code == 200:
        return response.text
    else:
        print(f"Failed to download HTML from {url}")
        return None


def parse_html(html):
    soup = BeautifulSoup(html, 'html.parser')
    # 在这里编写根据HTML结构提取内容的代码
    # 例如，假设你想提取所有的段落内容：
    paragraphs = soup.find_all(string=True)
    content = ''.join(
        [node.strip() for node in paragraphs if node.parent.name not in ['style', 'script', 'head', 'title']])
    return content


def write_to_txt(filename, content):
    with open(filename, 'w', encoding='utf-8') as file:
        file.write(content)


def main():
    # HTML页面的URL列表
    urls = [
        "http://api.bidcenter.com.cn/custom/1277449/Detail.aspx?id=4db4637b0b377a88f9cd13a91d5b8dc715e21bcdc5ce6b98",
        "http://api.bidcenter.com.cn/custom/1277449/Detail.aspx?id=5f9cbdf217671e3ebb658a1567d52a1f4e2452b5706c1ac5",
        "http://api.bidcenter.com.cn/custom/1277449/Detail.aspx?id=bf801038327631fbd7e91db2cbdb8f3b62af32ea2b02f1b1",
        "http://api.bidcenter.com.cn/custom/1277449/Detail.aspx?id=70be9dc9f1be1f6c595070f48e8ee3625a8b74d2aee17ec2",
        "http://api.bidcenter.com.cn/custom/1277449/Detail.aspx?id=f7e3bef58f05bd1ed8166c3c8ba4d73db9dabb77026d304a"
    ]
    # 保存TXT文件的文件夹路径
    output_folder = 'D:\item\langchainDemo\charomaDb\data\paul_graham'
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    for index, url in enumerate(urls):
        html = download_html(url)

        if html:
            content = parse_html(html)
            filename = os.path.join(output_folder, f'page_{index + 1}.txt')
            write_to_txt(filename, content)
            print(f"Content from {url} has been written to {filename}")


if __name__ == "__main__":
    main()
