from ua_info import ua_list
import time
from urllib import request, parse
import random

class myData(object):
    # 1.初始化
    def __init__(self):
        self.url = 'http://tieba.baidu.com/f?{}'

    # 2.设置请求的头文件及解析后的html
    def getHtml(self, url):
        req = request.Request(url=url, headers={"User-Agent":random.choice(ua_list)})
        res = request.urlopen(req)
        html = res.read().decode("gbk", "ignore")
        return html

    # 2.1进一步解析参数的函数
    def parseHtml(self):
        pass

    # 3.保存获取的信息
    def saveHtml(self, filename, html):
        with open(filename, 'w') as f:
            f.write(html)

    # 4.获取要在贴吧查找的信息
    def run(self):
        myTitle = input("请输入在贴吧要查找的内容：")
        begin = int(input("请输入起始页："))
        end = int(input("请输入终止页："))

        #对整个分页中的内容进行获取
        for page in range(begin, end):
            pn = (page-1)*30
            params = {
                'kw': myTitle,
                'pn': str(pn)
            }
            print(self.url)
            # 拼接html路径
            params = parse.urlencode(params)
            print(self.url)
            url = self.url.format(params)
            # 发送请求
            myHtml = self.getHtml(url)

            # 对每页信息进行保存
            filename = "{}-{}页.html".format(myTitle, page)
            self.saveHtml(filename, myHtml)

            print("第%d页，获取成功" %page)
            # 每次获取当前页信息后，进行休眠1~2秒
            time.sleep(random.randint(1, 2))

if __name__ == '__main__':
    start = time.time()

    mySp = myData()
    mySp.run()

    end = time.time()
    # 爬虫执行时间
    print("执行时间：%2f" % (end - start))
