import re
import requests
import pymysql
from lxml import etree
import time as ti

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'}
def get_infos(url1,figure):
    rqq=requests.get(url=url1,headers=headers)
    datas=etree.HTML(rqq.text)
    titles=datas.xpath('//a[@class="title"]/text()')  #文章标题
    contents=datas.xpath('//p[@class="abstract"]/text()')  #文章内容
    views_1=datas.xpath('//div[@class="meta"]/a[1]/text()') #观看人数
    views=[]
    for view_1 in views_1:  #对数据进行去掉换行处理
        b=view_1.strip()
        views.append(b)
    for c in views:   #对数据进行去空处理
        if c=='':
            views.remove(c)
    likes=datas.xpath('//li[@class="have-img"]/div[@class="content"]/div[@class="meta"]/span/text()')  #喜欢文章的人数
    times=re.findall('<span data-type=.*? data-datetime="(.*?)\+08:00">.*?</span>',rqq.text)  #发表时间
    for title,content,view1,like1,time in zip(titles,contents,views,likes,times):
        cursor.execute("insert into jianshu_dongtai(title,content,view1,like1,time)"
                       "values(%s,%s,%s,%s,%s)",(title,content,view1,like1,time))
    coon.commit()  # 提交数据
    # a=re.findall('<.*?id="feed-(.*?)">',rqq.text)[-1]  #取最后一个元素减一可得到下一个地址的url部分
    if (re.findall('<.*?id="feed-(.*?)">',rqq.text)[-1])!='':
        a = re.findall('<.*?id="feed-(.*?)">', rqq.text)[-1]
        next_figure = int(a) - 1
        print('已经爬取了第{}页'.format(figure))
        figure+=1
        next_url='https://www.jianshu.com/users/9104ebf5e177/timeline?max_id={}&page={}'.format(next_figure,figure)
        ti.sleep(1)
        get_infos(next_url,figure)
    else:
        return

if __name__=='__main__':
    figure=1
    # url1 = 'https://www.jianshu.com/users/9104ebf5e177/timeline?max_id=503264575&page=5'
    url1 = 'https://www.jianshu.com/users/9104ebf5e177/timeline?_pjax=%23list-container'
    coon = pymysql.connect(user='root', password='33570638', charset='utf8mb4', host='localhost',
                           db='pythondata', port=3306)  # 连接数据库
    cursor = coon.cursor()  # 创建游标对象
    get_infos(url1,figure)
