'''
- 爬虫的框架的新方法
'''
import urllib3
from lxml import etree

''' 爬虫 解析 '''


class Parser:
  def __init__(self):
    pass

  def parse(self, html):
    e = etree.HTML(html)
    data = self.parse_info(e)
    urls = self.parse_url(e)
    return urls, data

  def parse_url(self, e):
    base_url = 'https://www.qiushibaike.com%s'
    urls = []
    for url in e.xpath('//ul[@class="pagination"]/li/a/@href'):
      urls.append(base_url % url)
    return urls

  def parse_info(self, e):
    spans = e.xpath('//div[@class="content"]/span[1]')
    data = []
    for s in spans:
      data.append(s.xpath('string(.)'))
    return data


# 爬虫 链接管理
class URLManger(object):
  def __init__(self):
    self.new_url = []
    self.old_url = []

  def add_new_url(self, url):
    if url and url not in self.new_url and url not in self.old_url:
      self.new_url.append(url)
      print(self.new_url)

  def add_new_urls(self, urls):
    for url in urls:
      self.add_new_url(url)

  def has_new_url(self):
    return self.get_new_url_size() > 0

  def get_new_url_size(self):
    return len(self.new_url)

  def get_new_url(self):
    url = self.new_url.pop()
    self.old_url.append(url)
    return url


''' 爬虫 下载 '''


class Downloader:
  def __init__(self):
    pass

  def download(self, url):
    headers = {
      'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
    }
    http = urllib3.PoolManager()
    response = http.request('GET', url, headers=headers)
    if response.data:
      return response.data.decode('utf-8')
    else:
      return None


''' 爬虫 数据结果存储 '''


class DataOutPut:
  def __init__(self):
    pass

  def save(self, data):
    with open('duanzi.txt', 'a', encoding='utf-8') as f:
      for d in data:
        f.write(d)


''' 爬虫调度器 '''


class Scheduler2:

  def __init__(self):
    print('-' * 30, 'init')
    self.downloader = Downloader()
    self.urlManager = URLManger()
    self.parser = Parser()
    self.data_server = DataOutPut()

  def run(self, url):
    self.urlManager.add_new_url(url)
    while self.urlManager.has_new_url():
      url = self.urlManager.get_new_url()
      print(url)
      html = self.downloader.download(url)
      urls, data = self.parser.parse(html)
      print(data)
      print('-' * 30)
      print(urls)
      self.data_server.save(data)
      self.urlManager.add_new_urls(urls)


if __name__ == '__main__':
  s = Scheduler2()
  s.run('https://www.qiushibaike.com/text/page/%d/' % 1)
