import requests
from bs4 import BeautifulSoup
import json
import pandas as pd

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
    "Cookie": "Hm_lvt_f777bb53084c419dba5c6829b6cb970d=1677633850,1678155617,1678325752,1678678341; Hm_lpvt_f777bb53084c419dba5c6829b6cb970d=1678683977",
}

#函数抓取指定url的html网页
def craw_page(page_index):
    if page_index == 1:
        url = f"https://www.subingwen.cn/"
    else:
        url = f"https://www.subingwen.cn/page/{page_index}"

    resp = requests.get(url, headers=headers)
    print(resp.status_code)
    resp.encoding= "utf-8"
    print(resp.encoding)
    if resp.status_code != 200:
        raise Exception
    # print(resp.text)
    return resp.text


#函数解析爬取的html网页
def parser_html(html):
    soup = BeautifulSoup(html, 'html.parser')
    articles = soup.find_all("div", class_="recent-post-info")
    datas = []
    for article in articles:
        #获取title
        article_title = article.find("a", class_="article-title").get_text()

        #获取日期
        article_time = article.find("time").get_text()

        #获取article_categories
        article_meta_categories = article.find("a", class_="article-meta__categories").get_text()

        #获取tags
        article_meta_tags = article.find("a", class_="article-meta__tags").get_text()

        datas.append([article_title, article_time, article_meta_categories, article_meta_tags])
    return datas



#抓取指定url的内容
all_datas = []
for page_index in range(1, 14 + 1):
    html = craw_page(page_index)
    datas = parser_html(html)
    all_datas.extend(datas)


for data in all_datas:
    print(data)


#处理数据：写到excel中
df = pd.DataFrame(all_datas, columns=["article_title", "article_time", "article_meta_categories", "article_meta_tags"])
df.to_excel("爱编程的大丙.xlsx", index=False)