'''
爬取网页文字程序
'''
# 1.发送请求，对于文章列表页面发送请求
# 2.请求URL地址
import requests  # 数据请求模块
from bs4 import BeautifulSoup

# 携带请求头参数 headers 请求头是字典类型
headers = {
    'user-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.67 Safari/537.36'
}
# 请求方式：常见 get请求 post请求


# <Response [200]> 是一个对象  需要 . 调用
"""
<a>hello world</a>
"""


# # id
# . class='xxx'  .xxx


def get_content(url):
    resp = requests.get(url=url, headers=headers)
    soup = BeautifulSoup(resp.text, 'html.parser')
    div = soup.find(name='div', attrs={'class': 'read-content'})
    res = ''
    for p in div.find_all(name='p'):
        res += p.string + '\n'

    with open('result.txt', encoding='utf-8', mode='a+') as f:
        f.write(res)
        f.write('\n\n')


if __name__ == '__main__':
    url = 'http://bbs.tianya.cn/post-no05-525925-1.shtml'
    home_page = requests.get(url=url, headers=headers)
    home_soup = BeautifulSoup(home_page.text, 'html.parser')
    items = home_soup.find(name='div', attrs={'class': 'atl-main'}).find_all(attrs={'class': 'atl-item'})
    prefix = 'https://www.xingzuo123.com'
    links = []
    for item in items:
        msg = item.find(attrs={'class': 'bbs-content'}).string
        if msg is not None:
            print(msg.strip())
        else:
            print('kong')
