#encoding:utf-8
'''
Created on 2015年7月13日

@author: LWD
'''
import os
import threading
from src.spider.izhihu.question import Question
from src.store.PMySql import PMysql
from src.spider.common.CQueue import CQueue
from src.spider.izhihu.topic import Topic

class Question_Spider(threading.Thread):
        
    def run(self):
        '''
        @summary: 
        '''
        global mutex_page
        global mutex_queue
        global queue
        global firstPage
        global page
        # 从数据库连接池 获取数据库操作对象
        mysql = PMysql()
        sql_tpl = "INSERT INTO question (id, uid, title, detail, topic, answers, followers, ctime, state, visitTime) \
                       VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
        # 根话题
        rootTopic = Topic("www.zhihu.com/topic/19776749/questions")
        try:
            while True:
                # 要爬取的下一页
                try:
                    mutex_page.acquire()
                    current_page = page
                    page += 1
                finally:
                    mutex_page.release()
                    
                # 爬取
                questions = rootTopic.get_questions(current_page)
                # 如果没有返回结果 则break，退出循环
                if len(questions) == 0:
                    break
                
                ##
                # 存储问题信息
                ##
                # 存放当前页  没有存储过的问题id
                rquestion_list = []
                try:
                    # 加锁
                    mutex_queue.acquire()
                    # 挑选没有爬取过的问题的id添加到rquestions_list
                    # 同时添加到queue中
                    for q_id in questions:
                        question = Question("http://www.zhihu.com/question/" + q_id)
                        if q_id not in queue:
                            rquestion_list.append(q_id)
                            queue.enqueue(q_id)
                finally:
                    mutex_queue.release()
                # 爬取当前页没有爬取过的问题的详细信息，并且存储到数据库
                # 线程之间没有影响，不需要阻断其他线程
                for q_id in rquestion_list:
                    # question.store()
                    # 获取问题信息
                    q_title = question.get_title()
                    q_detail = question.get_detail()
                    answers_num = question.get_answers_num()
                    followers_num = question.get_followers_num()
                    (author, ctime) = question.get_question_author_and_ctime()
                    state = question.get_question_state()
                    visit_times = question.get_visit_times()
                    topics = question.get_topics()
                    # 存储问题信息
                    value = (q_id, author, q_title, q_detail, topics, answers_num, followers_num, ctime, state, visit_times)
                    mysql.insertOne(sql_tpl, value)
                
        finally:
            mysql.dispose()

# 线程之间互斥锁
mutex_page = threading.Lock()
mutex_queue = threading.Lock()
# 存储历史最近爬取过的5 * threads * 20 个问题
# 用来判断是否有问题产生，导致分页结果与初始不同
queue = CQueue()
# 开始爬取时，第一页所有的问题（20个），初始化后，不可修改
firstPage = []
# 初始爬取的页面
page = 1
threads = 5

if __name__ == '__main__':
    '''
    @summary: 初始添加根话题到队列，开启线程爬取话题
    '''
    print  os.path.dirname(os.path.abspath(__file__))
    
    PMysql.read_options_config()
    # 爬取第一页
    rootTopic = Topic("www.zhihu.com/topic/19776749/questions")
    # 存储到 firstPage 和 queue
    questions = rootTopic.get_questions(1)
    for q_id in questions:
        queue.enqueue(q_id)
        firstPage.append(q_id)
    
    for i in xrange(threads):
        p = Question_Spider()
        p.start()
    