# ********************************************
# 使用面向过程思想对爬虫代码进行封装
# V2.0：实现多个页面的元素抓取

# ********************************************
import requests
from lxml import etree


# 属性URL是从main中传入的，在类的初始化方法中给定属性的值
def set_URL(num):
    if num == 1:
        # 输入：URL
        url = "http://42.192.62.88:30166/"
    # 点击页面最最下方的查看更多，跳到下一页
    else:
        url = "http://42.192.62.88:30166/news.html"
    return url


# *************获取页面信息**********************
def get_page(url):
    # 发送请求
    response = requests.get(url)

    content = response.text
    # print(content)
    return content


# ****************提取页面元素****************************
def get_element(num, content):
    # 存储抓取数据
    # listcont = []
    # 将页面信息转化为dom格式
    doc = etree.HTML(content)
    tmp = ""
    i = 1
    if num == 1:
        ele_xpath = '/html/body/div[4]/div[2]/ul[1]/li['
    else:
        ele_xpath = '/html/body/div[3]/div[2]/div[2]/ul/li['
    # 循环提取一个页面上的元素
    for i in range(1, 10):
        # 通过xpath进行元素定位
        ele = doc.xpath(ele_xpath + str(i) + '] / a/text()')[0]

        # 打印输出元素
        # print(i,ele)
        # 元素内容列表
        # 将元素逐个添加到列表中
        ele = ''.join(ele.split())
        tmp = tmp + str(i) + ele + "\n" + "\n"
    listcontent = tmp
    print(listcontent)
    return listcontent


# ****************保存抓取内容****************************
def save_element(listcontent):
    # 元素内容列表
    # 创建一个文件
    file = open("spider.txt", "a")
    # 写入元素内容
    file.write(listcontent + "\n")
    # 保存并关闭文件
    file.close()

    # 逐个调试


if __name__ == '__main__':
    for i in range(1, 3):
        url = set_URL(i)
        # 发送请求
        # 第几页
        print("第", i)
        content = get_page(url)
        listcontent = get_element(i, content)
        save_element(listcontent)
