#coding:utf-8
import requests
from time import sleep
from lxml import etree
from auth import islogin
from auth import Logging
import requests, cookielib, re, json

requests = requests.Session()
requests.cookies = cookielib.LWPCookieJar('cookies')
try:
    requests.cookies.load(ignore_discard=True)
except:
    Logging.error(u"你还没有登录知乎哦 ...")
    Logging.info(u"执行 `python auth.py` 即可以完成登录。")
    raise Exception("无权限(403)")

def retry_request(requests, url, **kw):
    from time import sleep
    for i in xrange(30):
        # return requests(url, **kw)
        try:
            return requests(url, **kw)
        except Exception, e:
            print e
            return None
        sleep(5)

def parser_user(url, html):
    from lxml import etree
    tree = etree.HTML(html)
    try:
        desc1 = tree.xpath("//div[@class='zm-profile-module-desc']/span/strong/text()")
        desc2 = tree.xpath("//div[@class='profile-navbar clearfix']/a/span/text()")
        description_temp = tree.xpath("//div[@class='title-section ellipsis']/span/text()")
        location_temp = tree.xpath("//span[@class='location item']/a/text()")
        business_temp = tree.xpath("//span[@class='business item']/a/text()")
        education_temp = tree.xpath("//span[@class='education item']/a/text()")
        education_extra_temp = tree.xpath("//span[@class='education-extra item']/a/text()")
        employment_temp = tree.xpath("//div[@data-name='employment']/span/span/text()")
        gender_temp = tree.xpath("//span[@class='item gender']/i/@class")
        if len(gender_temp)==0:
            gender = None
        else:
            gender = 'male' if gender_temp[0] == 'icon icon-profile-male' else 'female'
        data = dict(
            url=url,
            user_name = tree.xpath("//div[@class='title-section ellipsis']/a/text()")[0],
            description = description_temp[0] if len(description_temp)>0 else None,
            location = location_temp[0] if len(location_temp)>0 else None,
            business = business_temp[0] if len(business_temp)>0 else None,
            education = education_temp[0] if len(education_temp)>0 else None,
            education_extra = education_extra_temp[0] if len(education_extra_temp)>0 else None,
            url_hash = hash(url),
            agree_num = int(desc1[0]),
            thanks_num = int(desc1[1]),
            fav_num = int(desc1[2]),
            share_num = int(desc1[3]),
            ask_num = int(desc2[1]),
            ans_num = int(desc2[2]),
            article_num = int(desc2[3]),
            followees_num = int(tree.xpath("//div[@class='zm-profile-side-following zg-clear']/a/strong/text()")[0]),
            followers_num = int(tree.xpath("//div[@class='zm-profile-side-following zg-clear']/a/strong/text()")[1]),
            gender = gender,
            employment = employment_temp[0] if len(employment_temp)>0 else None,
            view_num = int(tree.xpath("//span[@class='zg-gray-normal']/strong/text()")[0])
            )
        return True, data
    except IndexError,e:
        import traceback
        traceback.print_exc()
        print "parser error", e
        return False, url

def parser_answer(url):
    from lxml import etree
    r = retry_request(requests.get, url+'/answers?order_by=created')
    if r is None:
        return None
    tree = etree.HTML(r.content)
    return_data = []
    try:
        page_item = tree.xpath("//div[@class='zm-invite-pager']/span/a/text()")
        if len(page_item)==0:
            page_num = 1
        else:
            page_num = int(page_item[-2])
        for i in xrange(page_num):
            if i>1:
                r = retry_request(requests.get, url+'/answers?order_by=created&page=%s'%(i+1))
                if r is None:
                    return None
            question_items = tree.xpath("//a[@class='question_link']/@href")
            agree_items = tree.xpath("//a[@class='zm-item-vote-count js-expand js-vote-count']/text()")
            for index in xrange(len(question_items)):
                return_data.append({'url': 'https://www.zhihu.com'+question_items[index], 'agree_num': int(agree_items[index])})
        return return_data
    except IndexError, e:
        return None


class User:
    def __init__(self, user_url):
        if user_url.startswith('www.zhihu.com/people', user_url.index('//') + 2) == False:
            raise ValueError("\"" + user_url + "\"" + " : it isn't a user url.")
        else:
            self.user_url = user_url


    def parser(self):
        if hasattr(self, 'tree'):
            return
        r = retry_request(requests.get, self.user_url)
        if r is None:
            raise IndexError("\"" + self.user_url + "\"" + "request error")
        self.tree = etree.HTML(r.content)


    def get_followers_url(self, end_num=10000):
        if  not hasattr(self, 'tree'):
            self.parser()
        followers_num = int(self.tree.xpath("//div[@class='zm-profile-side-following zg-clear']/a/strong/text()")[1])
        if followers_num == 0:
            return
            yield
        else:
            follower_url = self.user_url + "/followers"
            r = retry_request(requests.get, follower_url)

            if r is None:
                return
                yield
            tree = etree.HTML(r.content)
            for index, i in enumerate(xrange((followers_num - 1) / 20 + 1)):
                if index*20 > end_num:
                    break
                if i == 0:
                    user_url_list = tree.xpath("//h2[@class='zm-list-content-title']/a/@href")
                    for j in user_url_list:
                        yield j
                else:
                    post_url = "http://www.zhihu.com/node/ProfileFollowersListV2"
                    _xsrf = tree.xpath("//input[@name='_xsrf']/@value")[0]
                    offset = i * 20
                    hash_id = re.findall("hash_id&quot;: &quot;(.*)&quot;},", r.text)[0]
                    params = json.dumps({"offset": offset, "order_by": "created", "hash_id": hash_id})
                    data = {
                        '_xsrf': _xsrf,
                        'method': "next",
                        'params': params
                    }
                    header = {
                        'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0",
                        'Host': "www.zhihu.com",
                        'Referer': follower_url
                    }
                    r_post = retry_request(requests.post, post_url, data=data, headers=header)

                    follower_list = r_post.json()["msg"]
                    if len(follower_list)==0:
                        sleep(5)
                        print "request pause"
                    for j in follower_list:#xrange(min(followers_num - i * 20, 20)):
                        follower_tree = etree.HTML(j)
                        user_link = follower_tree.xpath("//h2[@class='zm-list-content-title']/a/@href")[0]
                        yield user_link

    def get_followees_url(self, end_num=10000):
        if  not hasattr(self, 'tree'):
            self.parser()
        followees_num = int(self.tree.xpath("//div[@class='zm-profile-side-following zg-clear']/a/strong/text()")[0])
        if followees_num == 0:
            return
            yield
        else:
            followee_url = self.user_url + "/followees"
            r = retry_request(requests.get, followee_url)

            if r is None:
                return
                yield
            tree = etree.HTML(r.content)
            for index, i in enumerate(xrange((followees_num - 1) / 20 + 1)):
                if index*20 > end_num:
                    break
                if i == 0:
                    user_url_list = tree.xpath("//h2[@class='zm-list-content-title']/a/@href")
                    for j in user_url_list:
                        yield j
                else:
                    post_url = "http://www.zhihu.com/node/ProfileFolloweesListV2"
                    _xsrf = tree.xpath("//input[@name='_xsrf']/@value")[0]
                    offset = i * 20
                    hash_id = re.findall("hash_id&quot;: &quot;(.*)&quot;},", r.text)[0]
                    params = json.dumps({"offset": offset, "order_by": "created", "hash_id": hash_id})
                    data = {
                        '_xsrf': _xsrf,
                        'method': "next",
                        'params': params
                    }
                    header = {
                        'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0",
                        'Host': "www.zhihu.com",
                        'Referer': followee_url
                    }
                    r_post = retry_request(requests.post, post_url, data=data, headers=header)

                    followee_list = r_post.json()["msg"]
                    if len(followee_list)==0:
                        sleep(5)
                        print "request pause"
                    for j in followee_list:#xrange(min(followees_num - i * 20, 20)):
                        followee_tree = etree.HTML(j)
                        user_link = followee_tree.xpath("//h2[@class='zm-list-content-title']/a/@href")[0]
                        yield user_link
