import os
import re

import pandas as pd
import requests
from tqdm import tqdm


def sanitize_filename(filename):
    """去除文件名中的非法字符，并保证合法性."""
    filename = re.sub(r'[\\/:*?"<>|]', '_', filename)  # 替换非法字符
    filename = re.sub(r'\s+', '_', filename)  # 替换空格为下划线
    filename = re.sub(r'_+', '_', filename)  # 避免多个连续下划线
    filename = filename.strip('_')  # 去除首尾多余的下划线
    return filename


def download_file(url,
                  save_dir,
                  filename=None,
                  show_progress=True,
                  timeout=10):
    """下载文件到指定目录。

    :param url: 文件下载链接
    :param save_dir: 保存目录（自动创建）
    :param filename: 指定文件名（默认从 URL 提取）
    :param show_progress: 是否显示进度条
    :param timeout: 请求超时时间（秒）
    """
    os.makedirs(save_dir, exist_ok=True)  # 确保目标目录存在

    if filename is None:
        filename = url.split('/')[-1]  # 从 URL 获取文件名
    filename = sanitize_filename(filename)
    save_path = os.path.join(save_dir, filename)
    if os.path.exists(save_path):
        print(f'文件已存在: {save_path}')
        return save_path

    try:
        response = requests.get(url, stream=True, timeout=timeout)
        response.raise_for_status()  # 检查 HTTP 请求是否成功
        total_size = int(response.headers.get('content-length', 0))  # 获取文件大小

        with open(save_path, 'wb') as file:
            if show_progress and total_size > 0:
                with tqdm(
                        total=total_size,
                        unit='B',
                        unit_scale=True,
                        unit_divisor=1024,
                        desc=filename) as bar:
                    for chunk in response.iter_content(1024):
                        file.write(chunk)
                        bar.update(len(chunk))
            else:
                for chunk in response.iter_content(1024):
                    file.write(chunk)

        print(f'文件已成功下载: {save_path}')
        return save_path
    except requests.exceptions.RequestException as e:
        print(f'下载失败: {e}')
        return None


pd_data = pd.read_csv('企业年报.csv')
print(pd_data.shape)
pd_data = pd_data.drop_duplicates()
print(pd_data.shape)
# print(pd_data)

df_unique = pd_data.drop_duplicates(subset=['adjunctUrl'], keep='first')
print(df_unique.shape)

url = 'http://www.cninfo.com.cn/new/announcement/download?bulletinId=1222721971&announceTime=2025-03-06'
for index, row in tqdm(df_unique[::-1].iterrows(), total=df_unique.shape[0]):
    if '摘要' in row['announcementTitle']:
        continue
    announcementId = row['announcementId']
    url = f'http://www.cninfo.com.cn/new/announcement/download?bulletinId={announcementId}'
    name = f"{row['secName']}_{row['announcementTitle']}.pdf"
    # print(url)
    download_file(url, 'data', filename=name)
