# import requests
# from bs4 import BeautifulSoup
# from urllib.parse import urljoin
# import pandas as pd
# import json
# import time
#
# # 用于存储数据的列表
# data = []S
# for i in range(4, 0, -1):
#     url = f"https://www.ahhhhfs.com/page/{i}/"
#     headers = {
#         "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.82 Safari/537.36"
#     }
#     print(url)
#     # 发送请求, 获取页面源代码
#     resp = requests.get(url, headers=headers)
#     time.sleep(2)
#
#     resp.encoding = 'utf-8'  # 设置一下字符集
#     # 1. 创建BeautifulSoup的对象
#     main_soup = BeautifulSoup(resp.text, "html.parser")
#     div_list = main_soup.find("div", attrs={"class": "posts-warp row row-cols-1 row-cols-md-2 g-2 g-md-3 g-lg-4"}).find_all("div", attrs={"class": "entry-media ratio ratio-3x2 col-auto"})
#     # 3. 循环出每一个超链接
#     for h in div_list:
#         a_list=h.find_all("a")
#         for a in a_list:
#             href = a.get("href")
#             title = a.get("title")
#             data_bg = a.get("data-bg")
#             # 下载封面
#             img_resp = requests.get(data_bg, headers=headers)
#             time.sleep(2)
#             title=title.replace("/","-")
#             print(title)
#             titleFileName = title+".jpg"
#             picHref=[]
#             infoFileName = title+"1.jpg"
#             with open("D:\\mycode\\pachong\\"+titleFileName, mode="wb") as f:
#                 f.write(img_resp.content)
#             # 保存详情
#             info = requests.get(href, headers=headers)
#             time.sleep(2)
#             info.encoding = 'utf-8'  # 设置一下字符集
#             # 1. 创建BeautifulSoup的对象
#             main_soup = BeautifulSoup(info.text, "html.parser")
#             article_element=main_soup.find("article")
#             info_a_list=article_element.find_all("a")
#             infoHref=[]
#             for a in info_a_list:
#                 href = a.get("href")
#                 if "ahhhhfs" not in href and  "http" in href:
#                     infoHref.append(href)
#                 if href.endswith(".jpg"):
#                     #下载详情图片
#                     info_img_resp = requests.get(href, headers=headers)
#                     time.sleep(2)
#                     with open("D:\\mycode\\pachong\\"+infoFileName, mode="wb") as f:
#                         f.write(info_img_resp.content)
#             info_p_list=article_element.find_all("p")
#             infoText=""
#             for p in info_p_list:
#                 text=p.text
#                 if '👍' not in text:
#                     if '下载地址' not in text:
#                         if 'https' not in text:
#                             pText= text.replace("A姐","小编")
#                             infoText=infoText+pText
#             data.append([title, infoText, json.dumps(infoHref),titleFileName])
#
#
# # 创建 DataFrame
# df = pd.DataFrame(data, columns=['Title', 'content', 'resource_url','cover_url'])
#
# # 写入到 Excel 文件
# file_path = 'D:\\mycode\\pachong\\output.xlsx'
# df.to_excel(file_path, index=False)
#
#
