#!/usr/bin/env python
# -*- coding:utf-8 -*-

import urllib2
import re

class Spider:
    def __init__(self):
        self.page = 1
        self.switch = True

    def loadPage(self):
        '''
            下载页面
        '''
        print('正在下载数据...')
        url = 'http://www.neihan8.com/article/list_5_' + str(self.page) + '.html'
        headers = {"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
        request = urllib2.Request(url, headers = headers)
        response = urllib2.urlopen(request)
        html = response.read()
        pattern = re.compile('<div\sclass="f18 mb20">(.*?)</div>', re.S)
        conten_list = pattern.findall(html)
        self.dealPage(conten_list)


    def dealPage(self, content_list):
        '''
            处理每页的段子
            content_list：每页的段子列表集合
        '''
        for item in content_list:
            item = item.replace('<p>', '').replace('</p>', '').replace('<br />', '')
            # print(item.decode('gbk'))
            self.writePage(item.decode('gbk').encode('utf-8'))

    def writePage(self, item):
        '''
            把每条段子逐个写入文件
            item：处理后的每条段子
        '''
        # 写入文件内
        print('正在写入数据...')
        with open('duanzi.txt', 'a') as file:
            file.write(item)


    def startWork(self):
        '''
            控制爬虫运行
        '''
        # 循环执行，直到 self.switch == False
        while self.switch:
            # 用户确定爬去的次数
            self.loadPage()
            command = raw_input('如果继续爬取，请按回车（退出输入quit）')
            if command == 'quit':
                self.switch = False
            self.page += 1
        print('谢谢使用！')



if __name__ == "__main__":
    duanziSpider = Spider()
    # duanziSpider.loadPage()
    duanziSpider.startWork()