import random
import time

import requests
from lxml import etree

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWe'
                  'bKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'
}


class PinganSpider():
    def __init__(self, start_url, headers, save_path):
        self.headers = headers
        self.start_url = start_url
        self.save_path = save_path

    def paginator_spider(self):

        for i in range(1, 681):
            url = self.start_url + str(i)
            print(f'正在爬取第{i}个页面')
            text = self.get_html(url)
            time.sleep(random.random())
            urls = self.parse_page(text)
            if urls:
                with open('urls.txt', mode='a', encoding='utf-8') as f:
                    for url in urls:
                        f.write(url + '\n')
            else:
                with open('spider.log', mode='a', encoding='utf-8') as f1:
                    f1.write(f'第{i}个页面爬取失败')

    def get_html(self, url):
        try:
            r = requests.get(url, timeout=30, headers=self.headers)
            r.raise_for_status()
            r.encoding = r.apparent_encoding
            return r.text
        except:
            return '异常'

    def parse_page(self, text):
        tree = etree.HTML(text)
        urls = tree.xpath('//div[@class="news-list"]/a/@href')
        return urls

    def downloader(self):
        with open('urls.txt', mode='r', encoding='utf-8') as f:
            url_queue = f.read().splitlines()
        for i in range(len(url_queue)):
            url = 'https://www.pinganwj.com' + url_queue[i]
            text = self.get_html(url)
            time.sleep(random.random())
            with open(f'{self.save_path}{i+1}.html', mode='w', encoding='utf-8') as f:
                f.write(text)
            print(f'第{i+1}个页面保存成功')


if __name__ == '__main__':
    start_url = 'https://www.pinganwj.com/diseaseList/pg'
    save_path = 'D:/PycharmProjects/pingan_data/'
    spider = PinganSpider(start_url, headers, save_path)
    # spider.paginator_spider()
    spider.downloader()
