'''
分析爬取的数据
数据源地址：
数据内容：文章标题，文章的链接，作者，发布时间
工作：
    python,requusts,bs4
'''

import requests,json
from bs4 import BeautifulSoup

#1.定义请求的URL和请求头
url = 'https://www.lmonkey.com/t'
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}

#2.发送请求
res = requests.get(url=url,headers=headers)
if res.status_code == 200:


    #3.判断亲求是否成功，并获取请求发源代码
    soup = BeautifulSoup(res.text,'lxml')

    # 4.分析数据

    # 获取页面中所有的文章标题
    div = soup.find_all('div',class_="list-group-item list-group-item-action p-06")
    list_1 = []
    for i in div:
        r = i.find('div',class_="topic_title mb-0 lh-180")
        if r:
            dict_1 = {"title":r.text.split()[0],'author':i.strong.a.text,'url':i.a['href'],'time':i.span['title']}
            list_1.append(dict_1)
    print(list_1)

#5.写入数据
with open('./9.0.json',"w") as book:
    json.dump(list_1,book)
