#encoding: utf-8
'''
Created on 2015年6月28日

@author: LWD
'''
from time import time
import threading
from src.spider.common.Queue import Queue
from src.store.PMySql import PMysql
from src.spider.izhihu.user import User

class UserSpider(threading.Thread):        
    
    def run(self):
        global mutex
        global crawlered_set
        global waiting_for_crawler_queue
        
        sql_tpl = "INSERT INTO user_info (user_id, user_name, followees, followers, agrees, thanks, asks, answers, collections, gender, introduction, company, location, business, position, description, education, major) \
                       VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"

        # 数据库连接池  获取数据库对象
        mysql = PMysql()
        
        # 初始化 计时 30s
        endtime = time() + 10
        while True:
            # 从队列取一个用户id
            try:
                mutex.acquire()
                if waiting_for_crawler_queue.qsize() > 0:
                    user_id = waiting_for_crawler_queue.get()
                    crawlered_set.add(user_id)
                else:
                    # 如果队列是空，等待30s, 每秒检查一次 
                    # 如果 30s后还是空，则跳出循环
                    remaining = endtime - time()
                    if remaining <= 0.0:
                        break
                    else:
                        continue
            finally:
                mutex.release()
            # 重置计时 30s
            endtime = time() + 10
            # 获取用户信息  并存储
            user = User("http://www.zhihu.com/people/"+user_id)
            
            user_name = user.get_user_id()
            followees = user.get_followees_num()
            followers = user.get_followers_num()
            agrees = user.get_agree_num()
            thanks = user.get_thanks_num()
            asks = user.get_asks_num()
            answers = user.get_answers_num()
            collections = user.get_collections_num()
            gender = user.get_gender()
            introduction = user.get_introduction()
            company = user.get_company()
            location = user.get_location()
            business = user.get_business()
            position = user.get_position()
            description = user.get_description()
            education = user.get_education()
            major = user.get_major()
            
            print self.name + user_name
            
            value = (user_id, user_name, followees, followers, agrees, thanks, asks, answers, collections, gender, introduction, company, location, business, position, description, education, major)
            print value
            print introduction
            print sql_tpl%value
            mysql.insertOne(sql_tpl, value)
            
            # 将用户的关注者或者用户关注的人加入到队列中 （没有爬取过的用户）
            followees = user.get_followees()
            followers = user.get_followers()
            for fuser_id in followees:
                try:
                    mutex.acquire()
                    # 判断不在 集合 和 队列中
                    if fuser_id not in crawlered_set and fuser_id not in waiting_for_crawler_queue:
                        # 如果限制了 集合的大小，将关注者 和 关注的人 数比较多的用户放到集合中，没访问一次，其value减一
                        # 这样就保证最可能访问user_id在集合中，其他查询数据库判断是否应爬取过 
                        # 此处没有限制集合大小
                        #res = mysql.getOne("select * from user_info where user_id=%s", fuser_id)
                        # 判断数据库中不存在
                        #if len(res) == 0:
                        waiting_for_crawler_queue.put(fuser_id)
                        print fuser_id
                finally:
                    mutex.release()
            for fuser_id in followers:
                try:
                    mutex.acquire()
                    # 判断不在 集合 和 队列中
                    if fuser_id not in crawlered_set and fuser_id not in waiting_for_crawler_queue:
                        # 如果限制了 集合的大小，将关注者 和 关注的人 数比较多的用户放到集合中，没访问一次，其value减一
                        # 这样就保证最可能访问user_id在集合中，其他查询数据库判断是否应爬取过 
                        # 此处没有限制集合大小
                        #res = mysql.getOne("select * from user_info where user_id=%s", fuser_id)
                        # 判断数据库中不存在
                        #if len(res) == 0:
                        waiting_for_crawler_queue.put(fuser_id)
                        print fuser_id
                finally:
                    mutex.release()
        
        
        mysql.dispose() 
    
    

mutex = threading.Lock()
crawlered_set = set()
waiting_for_crawler_queue = Queue()
     
if __name__ == '__main__':
    '''
    '''
    PMysql.read_options_config()
    
    waiting_for_crawler_queue.put("liu-sheng-2-22")
    
    for i in xrange(4):
        spider = UserSpider()
        spider.start()
    