# import requests
# from bs4 import BeautifulSoup
# import pandas as pd

# def scrape_forest_data(url):
#     # 设置请求头，模拟浏览器访问
#     headers = {
#         'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
#     }
#     # 发送HTTP请求
#     response = requests.get(url, headers=headers)
#     # 检查请求是否成功
#     if response.status_code == 200:
#         # 解析HTML内容
#         soup = BeautifulSoup(response.text, 'html.parser')
        
#         # 查找表格数据
#         table = soup.find('table')
#         if table:
#             rows = table.find_all('tr')
#             data = []
#             for row in rows:
#                 cols = row.find_all('td')
#                 cols = [col.text.strip() for col in cols]
#                 data.append(cols)
#             # 转换为DataFrame
#             df = pd.DataFrame(data)
#             return df
#         else:
#             print("未找到表格数据")
#             return None
#     else:
#         print(f"请求失败，状态码：{response.status_code}")
#         return None

# # 目标URL
# url = 'https://www.cern.ac.cn/0index/index.asp'

# # 调用函数获取数据
# forest_data = scrape_forest_data(url)

# # 保存数据到CSV文件
# if forest_data is not None:
#     forest_data.to_csv('forest_data.csv', index=False, header=False, encoding='utf-8-sig')
#     print("数据已保存到forest_data.csv")
# else:
#     print("未能获取数据")
