import requests
from bs4 import BeautifulSoup
import markdownify 
import os
from llm import create_post

# 目标URL
url = "https://ai-bot.cn/ai-research/page/2/"
# entry-content
# 发起请求
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')

# 找到所有class为media-content的a标签

a_tags = soup.find_all('a', class_='media-content')[:3]

# 遍历每个a标签
for a_tag in a_tags:
    link = a_tag.get('href')
    if link:
        # 请求每个链接
        print(f"正在处理的链接: {link}")
        sub_response = requests.get(link)
        sub_soup = BeautifulSoup(sub_response.content, 'html.parser')
        
        # 找到class为panel-body single mt-2的内容
        panel_body = sub_soup.find('div', class_='panel-body single mt-2')
        if panel_body:
            # 处理包含ai-bot.cn的a标签
            for a_tag in panel_body.find_all('a'):
                href = a_tag.get('href', '')
                if 'ai-bot.cn' in href:
                    # 替换为高亮标签
                    new_tag = sub_soup.new_tag('strong')
                    new_tag.string = a_tag.text
                    a_tag.replace_with(new_tag)
        if panel_body:
            # 获取页面标题作为文件名
            title = sub_soup.find('h1').text.strip()
            # 将非法字符替换为下划线
            safe_title = "".join([c if c.isalnum() else "-" for c in title])
            # 转换为markdown
            markdown_content = markdownify.markdownify(str(panel_body))
            result = create_post(markdown_content)
            # 创建以标题命名的子目录
            os.makedirs(f'markdown_files/{safe_title}', exist_ok=True)
            # 保存到文件
            with open(f'markdown_files/{safe_title}/{safe_title}.md', 'w', encoding='utf-8') as f:
                f.write(f"# {title}\n\n")
                f.write(result)
            print(f"已保存: {safe_title}.md")