import requests
from bs4 import BeautifulSoup
import os
import csv
import re

# 文章个数记录
rounds = 0

# 创建目录
save_dir = r'C:\Users\曾宪树\Desktop\2240232246曾宪树\src'
csv_file_path = r'C:\Users\曾宪树\Desktop\2240232246曾宪树\src\统计.csv'

if not os.path.exists(save_dir):
    os.makedirs(save_dir)

# 打开 CSV 文件，设置 CSV 文件的列名
with open(csv_file_path, 'w', encoding='utf-8', newline='') as csv_file:
    fieldnames = ['序号', '时间', '字数', '标题']
    csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames)

    # 写入 CSV 文件的表头
    csv_writer.writeheader()

    # 查询三个网页
    domin1 = ['https://news.seig.edu.cn/cms/news/1/p/']
    all_url = []

    domin = 'https://news.seig.edu.cn'
    all_title_url = []  # 存储所有标题的URL

    for j in range(1, 4):  # 数组中三个网址
        url = '{}{}{}'.format(domin1[0], j, '.html')
        all_url.append(url)

    for page_url in all_url:
        print(page_url)  # 页面的网址

        hd = {'user-agent': 'Mircrosoft Edge'}
        r = requests.get(page_url, headers=hd)
        r.encoding = 'utf-8'

        soup = BeautifulSoup(r.text, 'html.parser')
        div_list = soup.find_all('div', class_='media-body')
        # print(div_list)
        for div in div_list:
            title_url = domin + div.h3.a['href']
            all_title_url.append(title_url)
            print(title_url)

            url = title_url
            hd = {'user-agent': 'Mircrosoft Edge'}

            # 查找标题文字
            r = requests.get(url, headers=hd)
            r.encoding = 'utf-8'

            soup = BeautifulSoup(r.text, features="html.parser")
            title = soup.find('div', class_='article-title')
            rounds += 1  # 自增1

            # 保存文件
            resp = requests.get(url, headers=hd)
            resp.encoding = 'utf-8'

            # 使用BeautifulSoup解析HTML
            soup = BeautifulSoup(resp.text, 'html.parser')

            # 提取文本内容
            text_content = soup.get_text()

            # 使用正则表达式过滤空格、标点符号或换行符等
            filtered_text = re.sub(r'\s+', '', text_content)

            # 计算整个页面的字数（排除空格、标点符号或换行符等）
            total_word_count = len(filtered_text)

            # 保存为txt文件
            txt_file_path = f'C:\\Users\\曾宪树\\Desktop\\2240232246曾宪树\\txt\\{rounds}.txt'
            with open(txt_file_path, 'w', encoding='utf-8') as txt_file:
                # 写入标题和发布时间
                txt_file.write(title.get_text() + '\n')
                info_div = soup.find('div', class_='article-info text-muted')
                if info_div:
                    ul_list = info_div.find_all('ul', class_='list-unstyled list-inline')
                    if ul_list:
                        li_list = ul_list[0].find_all('li')
                        if len(li_list) >= 4:
                            publish_time = li_list[4].get_text(strip=True).replace('发布时间：', '')
                            txt_file.write(publish_time + '\n')
                        else:
                            print("发布时间信息不完整")
                    else:
                        print("未找到 ul 元素")
                else:
                    print("未找到 article-info text-muted 元素")

                # 写入正文内容
                txt_file.write(text_content)

            print(f"文件保存成功！整个页面的字数（排除空格、标点符号或换行符等）：{total_word_count} 字")

            # 写入 CSV 文件一行数据
            csv_writer.writerow({'序号': rounds, '时间': publish_time, '字数': total_word_count, '标题': title.get_text()})

print("CSV 文件保存成功！")