import requests
from bs4 import BeautifulSoup
from common import putFileContent,p
from datetime import date

#cookie和header的正规化请求(全真读取数据)
#获取文章的标题+链接+时间+查看+点赞+评论 拼接 同时采用html的阅读格式输出 方便阅读
#双层的循环读取 第一层是文章循环  第二层是文章点赞阅读评论 这样数据就有了分析基础
#抓取工程里面的进度可视化 可以直观的看到抓取过程的进度
#给每篇文章进行编号 方便进行标记
#给不同的采集日期进行名字编号 方便查看不同的日志信息

cookies = {
    '_ga': 'GA1.1.1575449911.1719629830',
    'Hm_lvt_866c9be12d4a814454792b1fd0fed295': '1719629830,1720194877,1720798002,1721230608',
    'Hm_lpvt_866c9be12d4a814454792b1fd0fed295': '1721230608',
    'HMACCOUNT': '5D101472A49EB553',
    '_ga_M95P3TTWJZ': 'GS1.1.1721230607.8.0.1721230607.0.0.0',
}

headers = {
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-language': 'zh-CN,zh;q=0.9',
    'cache-control': 'no-cache',
    'pragma': 'no-cache',
    'priority': 'u=0, i',
    'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'none',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
}
def  caiji(page,num) :
    try:
        page_detail = ''
        response = requests.get('https://www.cnblogs.com/sitehome/p/{}'.format(page), cookies=cookies, headers=headers)
        html = response.text.encode('UTF-8')
        soup = BeautifulSoup(html, 'html.parser')
        articles  = soup.find_all("article",{"class":'post-item'})
        for arti in articles :
            #获取标题
            title = arti.find("a",{"class":"post-item-title"}).text.strip()
            #发布链接
            url = '<a href="'+arti.find("a",{"class":"post-item-title"}).get("href").strip() + '"> 跳转<a>'
            #发布时间
            ptime = arti.find("span" , {"class","post-meta-item"}).text.strip()
            #获取评论和浏览量
            cmts = arti.find_all('a',{"class" ,"post-meta-item btn"})
            comments_text = "|".join([comment.text.strip() for comment in cmts])

            #文章的详细数据信息采集 使用HTML格式查看
            article_detail = '<div>'+str(num)+'.<a href="' + arti.find("a", {"class": "post-item-title"}).get("href").strip() + '">'+title+'|'+comments_text+'|'+ptime+ '<a></div>'
            num = num + 1
            page_detail = page_detail + "\n" + article_detail
        fileName = str(date.today())+'cnblog.html'
        putFileContent(page_detail,fileName)
        return num
    except Exception as e:
        print(f"Error: {e}")
        return None


#兼容报错，如果超过页面 返回空或者错误，退出异常，支持任意填写数字
num = 1
for _ in range(1,101):
    num = caiji(_,num)
    if num is None:
        p("采集结束")
        break
    #直接运算方式写法
    print(f"已经采集第 {_} 页")