import requests
from requests.exceptions import ReadTimeout,HTTPError,RequestException
import re
import json
import time
from bs4 import BeautifulSoup
import pymysql
import schedule
import time
import datetime
import threading

CNBLOG_URL="https://www.cnblogs.com/mvc/AggSite/PostList.aspx"

#爬取数据
def CrawlData(pageindex):
    resultdata=[]

    headers={"content-type":"application/json; charset=UTF-8"}
    
    databodys={
        "CategoryId": 808,
        "CategoryType": "SiteHome",
        "ItemListActionName": "PostList",
        "PageIndex":pageindex,
        "ParentCategoryId":0,
         "TotalPostCount":4000
    }

    try:
        res=requests.post(CNBLOG_URL,data=json.dumps(databodys),headers=headers,verify=False)
        if res.status_code==200:
            html=BeautifulSoup(res.text,"html.parser")
            #获取所有博客
            postItems=html.select(".post_item")
            for item in postItems:
                #推荐数量
                blogdiggnum=int(item.select(".diggnum")[0].get_text())
                #博客标题
                blogTitle=item.select(".post_item_body h3 a")[0].get_text()
                #博客地址
                blogUrl=item.select(".post_item_body h3 a")[0].attrs["href"]
                #博客ID，不确定地址后边的尾数是不是博客的ID，这里暂且这么用了
                #发现有的不是数字，估计不是ID了
                #blogId=int(blogUrl.split("/")[-1].split(".")[0])
                #博客作者
                blogAuthor=item.select(".post_item_foot a")[0].get_text()
                #发布时间
                blogTime=item.select(".post_item_foot")[0].contents[2][11:27]+":00"
                #评论数量
                blogCommentNum=int(re.findall("\d+", item.select(".post_item_foot .article_comment a")[0].get_text())[0])
                #阅读数量
                blogViewNum=int(re.findall("\d+", item.select(".post_item_foot .article_view a")[0].get_text())[0])

                resultDic={
                    "blogdiggnum":blogdiggnum,
                    "blogtitle":blogTitle,
                    "blogurl":blogUrl,
                    "blogauthor":blogAuthor,
                    "blogtime":blogTime,
                    "blogcommentnum":blogCommentNum,
                    "blogviewnum":blogViewNum
                }
                resultdata.append(resultDic)
        else:
            print("fail")

    except ReadTimeout:
        print("timeout")
    except HTTPError:
        print("HttpError")
    except RequestException:
        print("httpError") 

    return resultdata

#插入数据
def InsertData(data):
    db= pymysql.connect(host="localhost",user="root",password="123456",db="cnblogdb",port=3306,charset="utf8")
    cur=db.cursor()
    sqltext="INSERT INTO tb_bloginfo(blogdiggnum,blogtitle,blogurl,blogauthor,blogtime,blogcommentnum,blogviewnum,AddTime)\
                VALUES(%d,'%s','%s','%s',str_to_date(\'%s\','%%Y-%%m-%%d %%H:%%i:%%s'),%d,%d,NOW());" % \
                 (data["blogdiggnum"],data["blogtitle"],data["blogurl"],data["blogauthor"],data["blogtime"],data["blogcommentnum"],data["blogviewnum"])
    try:
        cur.execute(sqltext)
        db.commit()
    except Exception:
        db.rollback()
        print("发生异常1")
    db.close()


def GetData():
    TotalPage = 201
    for pageindex in range(1,TotalPage):
        cur_result=CrawlData(pageindex)
        if len(cur_result)>0:
            for item in cur_result:
                #添加到数据库中保存
                #print(item)
                
                InsertData(item)
        print("已完成：%s/%s" % (pageindex,TotalPage-1))

def GetData_Task():
    threading.Thread(target=GetData).start()

def run():
    #每天22点执行
    schedule.every(1).days.at("22:00").do(GetData_Task)
    while True:
        schedule.run_pending()
        time.sleep(1)

if __name__ == '__main__':
    run()