import re
import requests
import lxml
from lxml import etree
from pymongo import MongoClient

class BaiduTieba(object):
    def __init__(self, name):
        self.get_url = "http://tieba.baidu.com/f?kw={}".format(name)
        self.spiders_object = requests.Session()
        self.spiders_object.headers = {
            "User-Agent": "Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"
        }
        # 创建mongodb对象
        self.client = MongoClient("localhost", 27017)
        self.collection = self.client["baidu"]["tiezi"]
        self.data_num = 0
    def get_html(self):
        return self.spiders_object.get(self.get_url).content.decode()
    def dispose_data(self, data):
        html = etree.HTML(data)
        xpath_data = html.xpath("""//li[@class=" j_thread_list clearfix"]/div/div[2]/div[1]/div/a""")
        self.get_url = "http:" + html.xpath("//a[@class='next pagination-item ']/@href")[0]
        # print(self.get_url)
        return xpath_data


    def import_mongodb(self, data):
        try:
            for i in data:
                self.collection.insert_one({"url": "http://tieba.baidu.com" + i.get("href"), "title": i.get("title")})
                self.data_num+=1
                print('当前抓取数据量%s'%self.data_num,)
        except BaseException:
            print("BaseException")
            return
    def downloader(self, data):
        pass

    def start(self):
        while True:
            print("开始处理页面：%s"%self.get_url)
            # 页面下载模块
            data = self.get_html()
            # 数据处理模块
            mongodb_data = self.dispose_data(data)
            # # 入库模块
            self.import_mongodb(mongodb_data)
            if self.get_url == []:
                break
        # # 下载模块
        # self.downloader(data)


if __name__ == '__main__':
    BaiduTieba = BaiduTieba(name='music')
    BaiduTieba.start()