import os
import threading
import re
import requests
from lxml import etree
from queue import Queue


class TiebaSpider:

    def __init__(self, tieba_name):
        self.tieba_name = tieba_name
        self.start_url = 'http://tieba.baidu.com/mo/q----,sz@320_240-1-3---2/m?kw={}&pn={}'
        self.part_url = 'http://tieba.baidu.com/mo/q----,sz@320_240-1-3---2/'
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Linux; Android 5.1.1; Nexus 6 Build/LYZ28E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Mobile Safari/537.36"
        }
        self.file_name_pattern = re.compile(r'[^a-zA-Z0-9\u4e00-\u9fa5]')
        self.url_quene = Queue()
        self.forum_page_quene = Queue()
        self.forum_list_quene = Queue()
        self.forum_detail_quene = Queue()
        self.file_quene = Queue()

    # 构造贴吧每页的url
    def get_url_list(self):
        for i in range(0, 10):
            self.url_quene.put(self.start_url.format(self.tieba_name, i*20))

    # 获取每一页的帖子
    def get_forum_page(self):
        while True:
            url = self.url_quene.get()
            # print(url)
            response = requests.get(url, headers=self.headers)
            self.forum_page_quene.put(response.content)
            self.url_quene.task_done()

    # 获取每一页的帖子列表
    def get_forum_list(self):
        while True:
            html_str = self.forum_page_quene.get()
            element = etree.HTML(html_str)
            div_list = element.xpath('//div[contains(@class,"i")]')
            for div in div_list:
                forum_url = self.part_url + div.xpath('./a/@href')[0] if len(div.xpath('./a/@href')[0]) > 0 else None
                if forum_url is not None:
                    self.forum_list_quene.put(forum_url)
            self.forum_page_quene.task_done()

    # 获取帖子详情html信息
    def get_forum_detail_list(self):
        while True:
            forum_url = self.forum_list_quene.get()
            response = requests.get(forum_url, headers=self.headers)
            self.forum_detail_quene.put(response.content)
            self.forum_list_quene.task_done()

    # 解析帖子详情
    def parse_forum_detail(self):
        while True:
            forum_detail = self.forum_detail_quene.get()
            element = etree.HTML(forum_detail)
            title = element.xpath('//div[@class="bc p"]/strong/text()')[0]
            file_name = self.file_name_pattern.sub('_', title) + '.txt'
            details = element.xpath('//div[@class="i"]/text()')
            file_data = {
                'file_name': file_name,
                'file_data': details
            }
            self.file_quene.put(file_data)
            # 获取下一页的url
            next_url = element.xpath('//a[text()="下一页"]/@href')
            if len(next_url) > 0:
                next_url = self.part_url + next_url[0]
                self.forum_list_quene.put(next_url)
            self.forum_detail_quene.task_done()

    # 保存数据
    def save_file(self):
        while True:
            file_data = self.file_quene.get()
            file_path = './txt/{}/'.format(self.tieba_name)
            if not os.path.exists(file_path):
                os.mkdir(file_path)
            file_name = file_path + file_data['file_name']
            with open(file_name, 'a', encoding='utf-8') as f:
                for detail in file_data['file_data']:
                    f.write(detail.replace('\n', '').replace(' ', '') + '\n')
            self.file_quene.task_done()

    def run(self):
        thread_list = []
        t_url = threading.Thread(target=self.get_url_list)
        thread_list.append(t_url)

        t_forum_page = threading.Thread(target=self.get_forum_page)
        thread_list.append(t_forum_page)

        t_forum_list = threading.Thread(target=self.get_forum_list)
        thread_list.append(t_forum_list)

        t_forum_detail_list = threading.Thread(target=self.get_forum_detail_list)
        thread_list.append(t_forum_detail_list)

        t_forum_detail = threading.Thread(target=self.parse_forum_detail)
        thread_list.append(t_forum_detail)

        t_save_file = threading.Thread(target=self.save_file)
        thread_list.append(t_save_file)

        for t in thread_list:
            t.setDaemon(True)
            t.start()

        for q in [self.url_quene, self.forum_page_quene, self.forum_list_quene, self.forum_detail_quene, self.file_quene]:
            q.join()

        print('over')


if __name__ == '__main__':
    spider = TiebaSpider('LOL')
    spider.run()
