#encoding=utf-8

import urllib.request
import urllib.parse


class TiebaSpider(object):
    def __init__(self):
        self.headers = {"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"}
        self.base_url = "https://tieba.baidu.com/f?"
        self.tieba_name = input("请输入需要抓取的贴吧:")
        self.start = int(input("请输入起始页码:"))-1
        self.end = int(input("请输入终止页码:"))

    def send_request(self, url):
        """接受url地址,发送请求,返回响应"""
        request = urllib.request.Request(url, headers=self.headers)
        
        print("[INFO]: 正在发送请求 {}".format(url))  # 以后学了日志后就采取日志方式记录
       #timeout设置超时时间,超时后将抛出socket.timeout异常,这里的异常处理在begin()中处理了,若不设置,则可能出现程序卡死在这里
       #超时还有其他的设置方式,如:设置全局的socket超时(socket模块)/使用定时器Timer(Timer模块)
        response = urllib.request.urlopen(request, timeout=3)
        return response

    def save_response(self, response_str, page):
        """接受响应,保存数据"""
        with open(self.tieba_name + str(page+1) + ".html","wb") as f:
            f.write(response_str)

    def begin(self):
        for page in range(self.start, self.end):
            url_dict = {
                    "kw": self.tieba_name,
                    "pn": 50*page
                    }
            url_str = urllib.parse.urlencode(url_dict)
            full_url = self.base_url + url_str
            
            try:
                response = self.send_request(full_url)
                self.save_response(response.read(), page)
            except Exception as e:
                print("[ERROR]: 页面抓取失败 {}".format(full_url))  # 以后学了日志后就采取日志方式记录
                print(e)


if __name__ == "__main__":
    spider = TiebaSpider()
    spider.begin()

