import requests
from bs4 import BeautifulSoup
import mysql.connector

# 获取网页内容的函数
def fetch_page(url):
    try:
        response = requests.get(url)
        response.encoding = response.apparent_encoding  # 自动推测编码
        return response.text
    except requests.RequestException as e:
        print(f"请求失败: {e}")
        return None

# 解析网页内容，提取 <li> 标签数据
def parse_li_tags(page_content,ul_id):
    soup = BeautifulSoup(page_content, 'html.parser')
    ul_tag = soup.find('ul',id=ul_id) # 根据 id 查找 ul 标签
    li_tags = ul_tag.find_all('li')  # 获取所有的 <li> 标签
    return li_tags

def get_time(input_string):
    # 获取最后14个字符
    time = input_string[-14:]
    
    # 去除左右括号
    time = time.strip('()')
    
    return time

# 连接到 MySQL 数据库
def connect_to_db():
    return mysql.connector.connect(
        host="127.0.0.1",  # 数据库主机
        user="root",  # 数据库用户名
        password="123456",  # 数据库密码
        database="stock",  # 数据库名
        port=3306
    )

# 将数据存入数据库
def store_data_to_db(brief,href,time,details):
    conn = connect_to_db()
    cursor = conn.cursor()

    # 插入数据
    for brief,href,time,details in zip(brief,href,time,details):
        cursor.execute("INSERT INTO hot_topics (hot_brief, hot_href,hot_time,hot_details) VALUES (%s, %s, %s, %s)", (brief,href,time,details))

    # 提交事务并关闭连接
    conn.commit()
    cursor.close()
    conn.close()

#爬取超链接下的热点资讯详情
def crawl_hot_details(href,id):
    hot_details=fetch_page(href)
    soup = BeautifulSoup(hot_details, 'html.parser')
    div_tag=soup.find('div',id=id)#获取指定id的div
    section_tag=soup.find('section',class_='art_content')#获取指定id的section
    if div_tag:
        p_tags=div_tag.find_all('p')
    else:
        if section_tag:
            p_tags=section_tag.find_all('p')
    print(p_tags)
    return p_tags

# 主函数：爬取每一页数据
def crawl_pages(base_url, total_pages=5):
    for page_num in range(1, total_pages + 1):
        # 生成每一页的 URL（假设是分页形式 /page/1, /page/2 等）
        url = f"{base_url}&page={page_num}"
        print(f"正在爬取：{url}")
        brief=[]
        href=[]
        time=[]
        details=[]
        # 获取网页内容并解析
        page_content = fetch_page(url)
        if page_content:
            li_tags = parse_li_tags(page_content,'listcontent')
            # 遍历并打印每个 <li> 标签的文本
            for li in li_tags:
                a_tag = li.find('a')  # 找到 li 下的第一个 a 标签
                if a_tag and a_tag.get('href'):  # 如果 a 标签存在且有 href 属性
                    text=li.get_text(strip=True) # .get_text() 去掉标签，只留下文本
                    p_tags=crawl_hot_details(a_tag.get('href'),'artibody')
                    if p_tags:
                        details.append(str(p_tags))
                    else:
                        details.append(" ")
                    brief.append(text[-14:])  
                    href.append(a_tag.get('href'))
                    time.append(get_time(text))
        else:
            print("无法获取页面内容，跳过此页")
    return brief,href,time,details

if __name__ == '__main__':
    base_url = 'https://finance.sina.com.cn/roll/index.d.html?fid=&cid=56588'  # 替换为目标网页的 URL
    brief,href,time,details=crawl_pages(base_url, total_pages=1)  # 设置要爬取的页数
    store_data_to_db(brief,href,time,details)
    print("数据已成功存入数据库")

