"""
使用面向对象思想对爬虫代码进行封装
V2.0：实现多个页面的元素抓取
一共三个设计方法
"""
import requests
from lxml import etree


class spider_V2:
    # 属性url是从main中传入的，在类的初始化方法中给定属性值
    def __init__(self, i):
        if i == 1:
            self.url = 'http://42.192.62.88:30166/'
        else:
            self.url = 'http://42.192.62.88:30166/'

    # ************************************************
    # 获取页面信息
    def get_page(self):
        # 输入：URL

        # 发送请求
        response = requests.get(self.url)
        # 页面编码转换
        code = response.apparent_encoding
        print(code)
        # 设置网络编码属性
        # response.encoding = 'gbk'
        # 页面对象
        self.content = response.text
        # print(content)
        # return content

    # ************************************************
    # 提取页面元素
    def get_element(self):
        tmp = ""
        # 转化为dom
        doc = etree.HTML(self.content)

        # 通过xpath进行元素定位
        # for j in range(2, 4):
        for i in range(2, 9):
            ele = doc.xpath(f'/html/body/div[6]/div[3]/div/div[{i}]/p/text()')[0]
            # 打印输出元素
            # print(i, ele)
            # 删除不可识别字符
            ele = ''.join(ele.split())
            self.tmp = tmp + str(i) + ele + '\n'
            # 下一页
            # url = f'http://42.192.62.88:30166/'
            # response = requests.get(url)
            # content = response.text
            # doc = etree.HTML(content)
            print(self.tmp)

    # ************************************************
    # 保存抓取内容
    def save_element(self):
        # 元素列表
        # 创建文件
        file = open('spider.txt', 'a')
        # 写入元素内容
        file.write(self.tmp + '\n')
        # 保存并关闭文件
        file.close()


if __name__ == '__main__':
    for i in range(1, 3):
        obj = spider_V2(i)
        print("第", i)
        obj.get_page()
        obj.get_element()
        obj.save_element()
