import threading
import time
import requests
from lxml import etree
from queue import Queue
from pymysql import connect
from random import randint


class TiebaSpider:

    def __init__(self, tieba_name):
        self.tieba_name = tieba_name
        self.start_url = 'http://tieba.baidu.com/mo/q----,sz@320_240-1-3---2/m?kw={}&pn={}'
        self.part_url = 'http://tieba.baidu.com/mo/q----,sz@320_240-1-3---2/'
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Linux; Android 5.1.1; Nexus 6 Build/LYZ28E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Mobile Safari/537.36"
        }
        self.url_quene = Queue()
        self.forum_page_quene = Queue()
        self.forum_list_quene = Queue()
        self.file_quene = Queue()
        self.forum_detail_quene = Queue()
        # 连接mysql
        self.conn = connect(host='localhost', port=3306, user='root', password='root',
                            database='kela_app', charset='utf8')
        self.cursor = self.conn.cursor()

    # 构造贴吧每页的url
    def get_url_list(self):
        for i in range(0, 20000):
            self.url_quene.put(self.start_url.format(self.tieba_name, i*20))

    # 获取每一页的帖子
    def get_forum_page(self):
        while True:
            url = self.url_quene.get()
            response = requests.get(url, headers=self.headers)
            # 休眠几秒，免得被封ip
            time.sleep(randint(1, 3))
            self.forum_page_quene.put(response.content)
            self.url_quene.task_done()

    # 获取每一页的帖子de用户名
    def get_forum_list(self):
        while True:
            html_str = self.forum_page_quene.get()
            element = etree.HTML(html_str)
            div_list = element.xpath('//div[contains(@class,"i")]')
            nicknames = []
            for div in div_list:
                nickname = div.xpath('./p/text()')[0]
                nickname = nickname.split()[2]
                nicknames.append(nickname)
                # 帖子详情url
                forum_url = self.part_url + div.xpath('./a/@href')[0] if len(div.xpath('./a/@href')[0]) > 0 else None
                if forum_url is not None:
                    self.forum_detail_quene.put(forum_url)
            self.file_quene.put(nicknames)
            self.forum_page_quene.task_done()

    # 解析帖子详情的用户名
    def parse_forum_detail(self):
        while True:
            forum_detail_url = self.forum_detail_quene.get()
            # print(forum_detail_url);exit;
            response = requests.get(forum_detail_url, headers=self.headers)
            element = etree.HTML(response.content)
            usernames = []
            for div in element.xpath('//div[contains(@class,"i")]'):
                username = div.xpath('./table/tr/td[@class="l"]/span/a/text()')[0]
                usernames.append(username)
            if len(usernames) > 1:
                self.file_quene.put(usernames)
            # 获取下一页的url
            next_url = element.xpath('//a[text()="下一页"]/@href')
            if len(next_url) > 0:
                next_url = self.part_url + next_url[0]
                self.forum_detail_quene.put(next_url)
            self.forum_detail_quene.task_done()
            time.sleep(randint(1, 3))

    # 保存数据
    def save_file(self):
        # 连接mysql
        conn = connect(host='localhost', port=3306, user='root', password='root',
                            database='kela_app', charset='utf8')
        cursor = conn.cursor()
        while True:
            usernames = self.file_quene.get()
            # 保存进mysql
            sql = 'insert into ad_usernames (username) values ("{}")'
            try:
                for username in usernames:
                    cursor.execute(sql.format(username))
                    conn.commit()
                    print('现在是第{}条'.format(cursor.lastrowid))
            except Exception as e:
                print(e)
            self.file_quene.task_done()

    def run(self):
        thread_list = []
        t_url = threading.Thread(target=self.get_url_list)
        thread_list.append(t_url)

        for _ in range(3):
            t_forum_page = threading.Thread(target=self.get_forum_page)
            thread_list.append(t_forum_page)

        for _ in range(3):
            t_forum_detail = threading.Thread(target=self.parse_forum_detail)
            thread_list.append(t_forum_detail)

        for _ in range(3):
            t_forum_list = threading.Thread(target=self.get_forum_list)
            thread_list.append(t_forum_list)

        for _ in range(3):
            t_save_file = threading.Thread(target=self.save_file)
            thread_list.append(t_save_file)

        for t in thread_list:
            t.setDaemon(True)
            t.start()

        for q in [self.url_quene, self.forum_page_quene, self.forum_list_quene, self.forum_detail_quene]:
            q.join()

        print('over')


if __name__ == '__main__':
    spider = TiebaSpider('太原理工大学')
    spider.run()
