import requests
from bs4 import BeautifulSoup
import json
import pandas as pd

url = "https://www.cnblogs.com/AggSite/AggSitePostList"

headers = {
    # ":authority": "www.cnblogs.com",
    # ":method": "POST",
    # ":path": "/AggSite/AggSitePostList",
    # ":scheme": "https",
    "accept": "text/plain, */*; q=0.01",
    "accept-encoding": "gzip, deflate, br",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "no-cache",
    "content-length": "140",
    "content-type": "application/json; charset=UTF-8",
    "cookie": "__gads=ID=2e28f31631b3b730:T=1669356542:S=ALNI_MYKaaZM1wdQy-uXiND_R6SLPN4-Og; clostip=0; Hm_lvt_866c9be12d4a814454792b1fd0fed295=1678155696,1678255625,1678325846,1678674319; _gid=GA1.2.1467698097.1678674320; __gpi=UID=00000b832f492b50:T=1669356542:RT=1678674226:S=ALNI_MZIzvEZvoc_jjyMqlhaUhvVF08z-w; .AspNetCore.Antiforgery.b8-pDmTq1XM=CfDJ8M-opqJn5c1MsCC_BxLIULllPepgoULYRE20XxLHc8AmtDY6Ar2KB99fU3m-8XsmJbZxfVR0UzJ1dMoHC9xYf_8WMR7o29see5ppxk1pvBgGQZXf0SD6raiJUebpYz4GYqcgJR4XlDg12idEeKcPD5w; .Cnblogs.AspNetCore.Cookies=CfDJ8M-opqJn5c1MsCC_BxLIULkt1k5MrCuqysrywsacq8XQO0op13NAmvgMSSeBSRvfXozI_mvaAi3qocAT0k2jlMAa2KF2aekQVxUxiXxsT8HKeHSHoZtquxqb3aOmGzuGvKOZ7yMDhBARXiZUWZMgLt1q848CEgPheGZQuQAxUu8tHvkU74frDs3j533p62BjgyKfnrU9GMYo7ZytgcBswlFclalS3nGyabn9aqNtoY06QoDGJTEcw1_BTk2FsNon-VbYAVe2q35qgHxbW37pa4tZ0z-b1mskwQ_laB6J3RjZo6kWDc01BVf8JS_bU1lG1LOpjaHn74E5TR73-aFt3TEbZHNUQ9nmWW2yUDt_6EaD3qJApPTT8fGNnbK3Ft3Ztw_qS0N5PRHaKsU0tD47OcyRImNfrJnab7FzQlFr5F_CVttr-3guIoaCLgqJqvazmWmIRkCqpovklbotNNKE9fH5e0utxZW79sh521za0acJEXso97YOai-tue_5MWPdRfguQtjZD9_N0nXXlx_rQp1SdN_SuS1KvYip_145Cp5Qw7CQPGiox-NMyGQhuSmBgw; .CNBlogsCookie=C9AB2DF943E5AE9B2AFDBEA959E73004F52FB0E8CEF8393CD50A68429A0FB7997F455A4BD59A5CACBDBF67E360B5CA56B250FBCBB2C722600D3A30CF129E43B19ECAC7EEF6F946584BC8FFE9121C0D20C60FE4C4; _ga_3Q0DVSGN10=GS1.1.1678674332.3.1.1678674831.30.0.0; _ga=GA1.2.1004102395.1669094219; _gat_gtag_UA_476124_1=1; _gat=1; Hm_lpvt_866c9be12d4a814454792b1fd0fed295=1678674837",
    "origin": "https://www.cnblogs.com",
    "pragma": "no-cache",
    "referer": "https://www.cnblogs.com/",
    "sec-ch-ua": '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111',
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": '"Windows"',
    "sec-fetch-dest": "empty",
    "sec-fetch-mode": "cors",
    "sec-fetch-site": "same-origin",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
    "x-requested-with": "XMLHttpRequest",
}

#函数抓取指定page的html网页
def craw_page(page_index):

    data = {
      "CategoryType": "SiteHome",
      "ParentCategoryId": 0,
      "CategoryId": 808,
      "PageIndex": page_index,
      "TotalPostCount": 4000,
      "ItemListActionName": "AggSitePostList"
    }

    resp = requests.post(url, json.dumps(data), headers=headers)
    print(resp.status_code)
    # print(resp.text)
    return resp.text


#函数解析爬取的html网页
def parser_html(html):
    soup = BeautifulSoup(html, 'html.parser')
    articles = soup.find_all("article", class_="post-item")
    datas = []
    for article in articles:
        #文章标题
        link = article.find("a", class_="post-item-title")
        title = link.get_text()
        href = link["href"]

        #文章作者
        author = article.find("a", class_="post-item-author").get_text()


        icon_digg = 0
        icon_comment = 0
        icon_views = 0
        for a in article.find_all('a'):
            if "icon_digg" in str(a):
                icon_digg = a.find("span").get_text()
            if "icon_comment" in str(a):
                icon_comment = a.find("span").get_text()
            if "icon_views" in str(a):
                icon_views = a.find("span").get_text()
        datas.append([title, href, author, icon_digg, icon_comment, icon_views])
    return datas


#抓取多个page的内容
all_datas = []
for page in range(0,1):
    print(f"抓取page:{page}")
    html = craw_page(page)
    datas = parser_html(html)
    all_datas.extend(datas)

df = pd.DataFrame(all_datas, columns=["title", "href", "author", "icon_digg", "icon_comment", "icon_views"])
df.to_excel("博客园文章信息.xlsx", index=False)