#encoding=utf-8

"""
    本代码只匹配第一层网页,未深入网页中链接,此为py3代码
"""

import requests
import re


class NeihanSpider(object):
    def __init__(self):
        self.headers = {
                "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
                "Referer": "https://www.neihan8.com/article/index_2.html",
                "Upgrade-Insecure-Requests": "1"
                }

        self.base_url = "https://www.neihan8.com/article/index_"
        #匹配网页中所有段子内容,这里需注意若要让.可以匹配到换行符,那么需要开启DOTALL模式
        self.pattern_page = re.compile(r'<div class="desc">(.*?)</div>', re.S)
        #匹配网页中无用字符: \s表示空白字符
        self.pattern_content = re.compile(r"&.*;|\s|" + u"\u3000")
        self.page = 1

    def send_request(self, url):
        """接收url, 并发送请求,返回响应"""
        print("[INFO]正在发送请: {}".format(url))
        response = requests.get(url, headers=self.headers)
        return response

    def parse_response(self, response):
        """接收响应, 提取数据"""
        #print(response.content)
        html = response.content.decode("utf-8")
        #print(html)
        content_list = self.pattern_page.findall(html)
        #print(content_list)
        return content_list

    def save_content(self, content_list):
        """保存数据"""
        with open("duanzi.txt", "a") as f:
            f.write("第{}页: \n".format(self.page))
            for content in content_list:
                content = self.pattern_content.sub("", content)
                f.write(content)
                f.write("\n")
            f.write("\n")

    def main(self):
        """启动函数"""
        while True:
            if input("输入q则退出: ").lower() == "q":
                break
            url = self.base_url + str(self.page) + ".html"
            if self.page == 1:
                url = self.base_url[:-1] + ".html"
            try:
                response = self.send_request(url)		
                content_list = self.parse_response(response)
                self.save_content(content_list)
            except Exception as e:
                print("[ERROR]:  数据抓取失败{}".format(url))
                print(e)
            self.page += 1

if __name__ == "__main__":
    neihan = NeihanSpider()
    neihan.main()

