"""
使用面向过程思想对爬虫代码进行封装
V1.0：实现一个页面的元素抓取
一共三个设计方法
"""
import requests
from lxml import etree


# ************************************************
# 获取页面信息
def get_page():
    # 输入：URL
    url = 'http://42.192.62.88:30166/'
    # 发送请求
    response = requests.get(url)
    # 页面编码转换
    code = response.apparent_encoding
    print(code)
    # 设置网络编码属性
    # response.encoding = 'gbk'
    # 页面对象
    content = response.text
    # print(content)
    return content


# ************************************************
# 提取页面元素
def get_element(content):
    tmp = ""
    # 转化为dom
    doc = etree.HTML(content)

    # 通过xpath进行元素定位
    # for j in range(2, 4):
    for i in range(2, 9):
        ele = doc.xpath(f'/html/body/div[6]/div[3]/div/div[{i}]/p/text()')[0]
        # 打印输出元素
        # print(i, ele)
        # 删除不可识别字符
        ele = ''.join(ele.split())
        tmp = tmp + str(i) + ele + '\n'
        # 下一页
        # url = f'http://42.192.62.88:30166/'
        # response = requests.get(url)
        # content = response.text
        # doc = etree.HTML(content)
    return tmp


# ************************************************
# 保存抓取内容
def save_element(con):
    # 元素列表
    # 创建文件
    file = open('spider.txt', 'w')
    # 写入元素内容
    file.write(con + '\n')
    # 保存并关闭文件
    file.close()


if __name__ == '__main__':
    con = get_page()
    res = get_element(con)
    save_element(res)
