#encoding:utf-8
'''
Created on 2015年7月9日

@author: LWD
'''

import sys
from session import create_session
from bs4 import BeautifulSoup
from src.spider.common.HashTable import HashTable

reload(sys)
session = None

class Topic:
    '''
    @summary: 
    '''

    url = None
    soup = None

    def __init__(self, url, topic=None, hasChild=True):
        '''
        @summary: 构造函数 （http://www.zhihu.com/topic/19776749）
        '''
        if url[0:len(url) - 8] != "http://www.zhihu.com/topic/":
            raise ValueError("\"" + url + "\"" + " : it isn't a topic url.")
        else:
            self.url = url
            self.hasChild = hasChild
            self.topic_id = self.url[-8:]
            if topic != None:
                self.topic = topic.encode("utf-8")
                
    def parser(self):
        '''
        @summary: 获取指定网页的内容（content）,使用BeautifulSoup对页面解析 
        结果赋值给全局变量 session
        '''
        # 全局变量，web会话
        global session
        
        # 判断 如果当前会话不存在，创建一个会话，并登陆
        if session == None:
            session = create_session()
        s = session
        
        # 读取指定页面的内容（content）
        r = s.get(self.url)
        
        # 使用BeautifulSoup 解析
        soup = BeautifulSoup(r.content)
        self.soup = soup
                
    def get_topic_id(self):
        '''
        @summary: 话题id(通过URL解析得到)，如果网址不对  返回0
        @return: 话题的id（int）
        '''
        if hasattr(self, "topic_id"):
            return self.topic_id
        if self.url == None:
            return 0
        else:
            return self.url[-8:]
    
    def _get_element_from_content(self, label, lclass, flag = 0):
        '''
        @summary: 根据标签和css类名查找对应标签， 
        @param label: 要查找的HTML标签
        @param lclass: 要查找的标签的css类
        @param flag: 0：查找结果只有一个值； 非0： 查找结果是一个元组
        @return: 查找结果，如果flag是0，返回一条结果，否则，返回的是一个元组
        '''
        # 没有页面地址，直接返回None
        if self.url == None:
            print "I'm anonymous user."
            return None
        else:
            # 如果没有下载页面,则下载并解析页面
            if self.soup == None:
                self.parser()
            soup = self.soup
            # 根据flag 执行不同的查找方式
            if flag == 0:
                # 查找结果只有一个
                res = soup.find(label, class_=lclass)
            else:
                # 查找所有符合要求的标签
                res = soup.find_all(label, class_=lclass)
            
            if not res: # 结果是空
                return None
            return res
    
    def get_topic_title(self):
        '''
        @summary: 获取话题名，出错时，返回  空
        @return: 话题名(字符串，编码：utf-8)
        '''
        if hasattr(self, "topic"):
            return self.topic
        title = self._get_element_from_content("h1", lclass="zm-editable-content")
        if title == None:
            return ""
        else:
            # 去掉中括号，转码为 utf-8
            return title.string.encode("utf-8")
    
    def get_questions(self, page = 1):
        '''
        @summary: 获取话题下 指定页的问题
        '''
        # 全局变量，web会话
        global session
        
        # 判断 如果当前会话不存在，创建一个会话，并登陆
        if session == None:
            session = create_session()
        s = session
        
        current_page =  str(page if page > 0 else 1)
        # 读取日志页面的内容
        r = s.get(self.url+"/questions?page=" + current_page)
        # 使用BeautifulSoup 解析
        soup = BeautifulSoup(r.content)
        questions_link = soup.find_all("a", class_="question_link")
        question = []
        #/question/31880936
        for link in questions_link:
            question.append(link["href"][10:])
        return question
        
    
    def get_topic_description(self):
        '''
        @summary: 获取话题描述
        @return: 返回话题描述（字符串， 编码：utf-8）
        '''
        description = self._get_element_from_content("div", lclass="zm-editable-content")
        if description == None or description.string == None:
            return ""
        else:
            return description.string.encode("utf-8")
    
    def get_parent_topic(self):
        '''
        @summary: 父话题id(可能有多个)
        @return: list（所有父话题的id）
        '''
        parent_items = self._get_element_from_content("div", lclass="zm-side-section-inner parent-topic")
        if parent_items == None:
            return ""
        parents = parent_items.find_all("a", class_="zm-item-tag")
        if parents == None:
            return ""
        parents_list = []
        for parent in parents:
            parents_list.append(parent.string.encode("utf-8").replace("\n", ""))
        return parents_list
    
    def get_children_topic(self):
        '''
        @summary: 子话题(可能有多个或者0个)，使用yield关键字 分批返回 子话题id hasChild
        '''
        global session
        # 如果没有子话题，则直接返回空
        if self.hasChild == False:
            yield []

        # 判断 如果当前会话不存在，创建一个会话，并登陆
        if session == None:
            session = create_session()
        s = session
        # 读取日志页面的内容
        r = s.get(self.url+"/organize/entire")
        # 使用BeautifulSoup 解析
        soup = BeautifulSoup(r.content)
        child_items_num_text = soup.find("a", class_="zg-link-litblue zm-topic-side-title-link").string
#         print len(child_items_num_text)
        if (child_items_num_text == "查看完整话题结构 »"):
            child_items_num = 20
        else:
            # 共有 28977 个子话题，查看话题结构 » 
            print child_items_num_text[3:][:-15]
            child_items_num = int(child_items_num_text[3:][:-15])
        
#         children_list = []
#         hashTable = HashTable()
#         lastChild = "19605043" # 出现一次获取8个话题的
#         lastChild = "19665004" # 未分类话题最后一个
        lastChild = ""
        
        post_url_tpl = "http://www.zhihu.com/topic/19776751/organize/entire?child=%s&parent="+self.topic_id
            # 请求列表参数
        data = {
            '_xsrf': "",
        }
        # post请求header, 火狐浏览器、Win7系统、
        header = {
            'User-Agent': "Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0",
            'Host': "www.zhihu.com",
            'Referer': "http://www.zhihu.com/topic/19776751/organize/entire"
        }
        total = 0
        # 开始循环获取子话题
        while total < child_items_num:
            # 组合请求url
            # 第一次请求 child字段为空，之后每次请求child设置为上一次请求的最后一个
            children_list = []
            post_url = post_url_tpl % lastChild
#             print "lastChild " + lastChild  + "   total = " + str(total)
            # post参数只有 _xsrf
            _xsrf = soup.find("input", attrs={'name': '_xsrf'})["value"]
            data['_xsrf'] = _xsrf,
            
            r_post = s.post(post_url, data=data, headers=header)
            children = r_post.json()["msg"]
            # 似乎并不是每次请求都返回10个话题，修改每次循环的计数方法，
            # 上层循环修改为 while 循环
            length = len(children[1])
            if length == 0:
                break
#             for j in xrange(min(child_items_num - i * 10, 10)):
            for j in xrange(min(child_items_num - total, length)):
                if children[1][j][0][0] == "topic":
                    topic_id = children[1][j][0][2]
                    lastChild = topic_id
#                     topic = children[1][j][0][1]
                    hasChild = False
                    if len(children[1][j][1]) != 0:
                        hasChild = True
                    total += 1  # 统计已经获取的子话题数
                    children_list.append((topic_id, hasChild))
            yield children_list
#                     print topic_id, topic,hasChild
#                     hashTable.Insert((topic_id, 1000))
#                     children_list.append(("http://www.zhihu.com/topic/" + topic_id, topic, hasChild))
#         return children_list
#         print hashTable.dsize()
#         return hashTable
    
    def get_followers_num(self):   
        ''' 
        @summary: 关注数
        '''
        follower_num = self._get_element_from_content("div", lclass="zm-topic-side-followers-info")
        if follower_num == None:
            return 0
        else:
            return int(follower_num.a.strong.string)
    
    def get_time(self):
        '''
        @summary: 获取话题最后修改时间，即话题最后一次编辑时间，如果出错，返回空
        @return: 话题最后编辑时间  时间格式 ：YYYY-MM-DD hh:mm:ss）
        '''
        # 全局变量，web会话
        global session
        
        # 判断 如果当前会话不存在，创建一个会话，并登陆
        if session == None:
            session = create_session()
        s = session
        # 读取日志页面的内容
        r = s.get(self.url+"/log")
        # 使用BeautifulSoup 解析
        soup = BeautifulSoup(r.content)
        time_items = soup.find_all("div", class_="zm-item-meta")
        if time_items == None:
            return ""
        else:
            return time_items[0].time.string
        
        