# -*- coding: UTF-8 -*-

import requests
import cookielib
from bs4 import BeautifulSoup
import json
import re
from util import mqUtil
import time

session=requests.session()
session.cookies=cookielib.LWPCookieJar()
session.cookies.load(filename="logincookies", ignore_discard=True, ignore_expires=True)

def getWbSize(content):
    wbSize=re.findall(r'>(\d+)<\\/strong>', content)[2]
    pageSize=0
    flag= int(wbSize)%45==0
    if flag == True:
        pageSize = int(wbSize)/45
    else:
        pageSize = int(wbSize)/45+1
    return pageSize


def parse(content, userId):
    bsoup=BeautifulSoup(content, "html5lib")
    for div in bsoup.findAll("div", {"tbinfo":"ouid=1664677310"}):
        wbObj={}
        wbObj['userId']=userId
        wbObj['tag']="sina:wb"
        try:
            wbObj['mid']=div['mid']
            wbObj['content']=div.find("div", {"node-type":"feed_list_content"}).text.strip()
        except:
            print "parse exception exist"
        mqUtil.publish(json.dumps(wbObj).decode('unicode_escape'))
    
def getPageInfo(page, pageSize, userObj):
    if page < pageSize:
        ajaxurl="ajwvr=6&domain="+ userObj["domain"] \
            +"&is_all=1&pagebar=0&pl_name=Pl_Official_MyProfileFeed__24&id="+str(userObj['pageId']) \
            +"&script_uri=/"+userObj["nickname"]\
            +"&feed_type=0&page="+str(page) \
            +"&pre_page="+str(page) \
            +"&domain_op="+userObj['domain'] \
            +"&__rnd=" + str(int(time.time()*1000))
        r=session.get("http://weibo.com/p/aj/v6/mblog/mbloglist?" + ajaxurl)
        pg=json.loads(r.text)
        parse(pg.get("data"), userObj['userId'])
        ajaxurl=ajaxurl.replace("pagebar=0", "pagebar=1")
        r=session.get("http://weibo.com/p/aj/v6/mblog/mbloglist?" + ajaxurl)
        pg=json.loads(r.text)
        parse(pg.get("data"), userObj['userId'])

def getUserInfo(page):
    pagestring = "?page="+str(page)
    headers={}
    headers['User-Agent']='Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0'
    r=session.get("http://weibo.com/zziahui" + pagestring, headers=headers)
    g=BeautifulSoup(r.text, "html.parser")
    userObj={'tag':"sina:user"}
    userObj['oid']=re.findall("CONFIG\['oid'\]='(\w+)'", r.text)[0]
    userObj['userId']=re.findall("CONFIG\['uid'\]='(\w+)'", r.text)[0]
    userObj['pageId']=re.findall("CONFIG\['page_id'\]='(\w+)'", r.text)[0]
    userObj['nickname']=re.findall("CONFIG\['onick'\]='(\w+)'", r.text)[0]
    userObj['sex']=re.findall("CONFIG\['sex'\]='(\w)'", r.text)[0]
    userObj['domain']=re.findall("CONFIG\['domain'\]='(\w+)'", r.text)[0]
    userObj['avatar']=re.findall("CONFIG\['avatar_large'\]='(.*)'", r.text)[0]
    userObj['location']=re.findall("CONFIG\['location'\]='(.*)'", r.text)[0]
    userObj['pid']=re.findall("CONFIG\['pid'\]='(.*)'", r.text)[0]
    userObj['title']=re.findall("CONFIG\['title_value'\]='(.*)'", r.text)[0]
    mqUtil.publish(json.dumps(userObj).decode('unicode_escape')) 
    scripts=g.findAll("script")
    script=scripts[len(scripts)-2].text[8:-1]
    j=json.loads(script.strip())
    parse(j.get("html"), userObj['userId'])
    getPageInfo(page, getWbSize(r.text), userObj)

for i in range(12):   
    userObj=getUserInfo(i+1)
