# import requests
# from bs4 import BeautifulSoup
#
#
# def fetch_website_data(url):
#     try:
#         # 发送HTTP GET请求
#         response = requests.get(url)
#         response.encoding = 'utf-8'
#         print('平安银行' in response)
#         # 检查请求是否成功
#         if response.status_code == 200:
#             # 使用BeautifulSoup解析HTML
#             soup = BeautifulSoup(response.text, 'html.parser')
#             print(soup)
#             # 假设我们要找的是<title>标签内的内容
#             title = soup.title.text
#             print(f"Title: {title}")
#         else:
#             print("Failed to retrieve data")
#     except requests.RequestException as e:
#         print(e)
#
#     # 示例网站URL
#
#
# url = 'https://www.szse.cn/api/report/ShowReport/data?SHOWTYPE=JSON&CATALOGID=1837_xxpl&txtDate=2024-07-05&txtZqdm=300251&random=0.1386412947192177'
# fetch_website_data(url)


import requests


def download_file(url, file_name):
    """
    下载文件
    :param url: 文件的URL
    :param file_name: 保存文件的名称
    """
    # 发起GET请求
    response = requests.get(url, stream=True)
    print(response)
    # 确保请求成功,如果不成功会抛出异常
    response.raise_for_status()

    # 使用with语句打开文件，确保正确关闭
    with open(file_name, 'wb') as file:
        # 分块写入文件
        for chunk in response.iter_content(chunk_size=8192):
            if chunk:  # 过滤掉keep-alive新发送的空包
                file.write(chunk)
            # 使用示例


url = 'https://www.szse.cn/api/report/ShowReport?SHOWTYPE=xlsx&CATALOGID=1837_xxpl&txtDate=2024-07-08&tab2PAGENO=1&random=0.9970942990905536&TABKEY=tab2'
urr = 'https://www.szse.cn/api/report/ShowReport?SHOWTYPE=xlsx&CATALOGID=1837_xxpl&txtDate=2024-07-05&tab2PAGENO=1&random=0.9970942910905536&TABKEY=tab2'
uaa = 'https://www.szse.cn/api/report/ShowReport/data?SHOWTYPE=JSON&CATALOGID=1837_xxpl&txtDate=2024-07-05&random=0.05247811576148975'
uuu = 'https://www.szse.cn/api/report/ShowReport?SHOWTYPE=xlsx&CATALOGID=1837_xxpl&txtDate=2024-07-07&txtZqdm=300251&random=0.6526820338078219&TABKEY=tab2'
file_name = 'downloaded_file-urr.xlsx'
download_file(urr, file_name)