# -*- coding: UTF-8 -*-
import cookielib
import requests
from bs4 import BeautifulSoup
import re
import json
from rabbitmq.MsgUtil import MsgUtil
from time import sleep
import urllib
from psina.action.log4psina import PSinaLogin

class SearchWb:
       
    def __init__(self):
        self.session = requests.session()
        self.msgUtil = MsgUtil()
        self.headers = {}
        self.headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"
        self.headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
        self.headers['Accept-Encoding'] = 'gzip, deflate, sdch'
        self.headers['Accept-Language'] = 'zh-CN,zh;q=0.8'
        self.headers['Upgrade-Insecure-Requests'] = "1"
    
    def parseText(self, content, keyword):
        jsoup = BeautifulSoup(content, "html5lib")
        for div in jsoup.findAll("div" , {"class":"list_person"}):
            try:
                obj = {}
                obj['keyword'] = keyword
                obj["task"] = "sina:search:user"
                obj['mainpage'] = div.find("div", {"class" :"person_pic"}).a['href']
                obj['nickname'] = div.find("div", {"class" :"person_pic"}).a['title']
                obj['avatar'] = div.find("div", {"class" :"person_pic"}).img['src']
                obj['userId'] = div.find("div", {"class" :"person_pic"}).img['uid']
                mainpage = div.find("a", {"class": "W_linkb"}).text
                mainpageId = re.findall(r'(http://weibo.com/u/|http://weibo.com/)(\w+)', mainpage)[0][1]
                obj['pageid'] = mainpageId
                obj['domain'] = mainpage
                obj['followstr'] = div.find("div", {"class" : "person_adbtn"}).a['action-data']
                obj['sex'] = re.findall(r'title=\"(男|女)\"', div.__str__())[0]
                label = div.find("p", {"class" : "person_label"}).text
                label = re.sub("\\n", "", label).strip()
                label = re.sub("(\\t)", "", label)
                obj['tag'] = label
                print obj
                self.msgUtil.publish(json.dumps(obj).decode('unicode_escape'))
            except:
                pass
        
    def searchUser(self, keyword, page = 1):
        try:
            url = "http://s.weibo.com/user/" + urllib.quote_plus(keyword) + "&page=" + str(page)
            self.session.cookies = cookielib.LWPCookieJar()
            self.session.cookies.load("../cookies/logincookies", ignore_discard = True, ignore_expires = False)
            self.headers['refer'] = "http://s.weibo.com/user/" + urllib.quote_plus(keyword) + "&page=" + str(page - 1)
            r = self.session.get(url, headers=self.headers)
            if r.status_code != 200:
                login = PSinaLogin("1002318371@163.com", "yanghui");
                login.doLogin()
                self.searchUser(keyword, page)
            self.session.close()
            p = re.findall(r'({\"pid\":\"pl_user_feedList\".*)\)</script>', r.content)[0]
            jsonp = json.loads(p)
            userinfo = jsonp.get("html")
            self.parseText(userinfo, keyword)
            if page == 1:
                pagediv = BeautifulSoup(userinfo, "html5lib").find("div", {"class": "layer_menu_list W_scroll"})
                if pagediv != None and pagediv.ul != None:
                    lis = pagediv.ul.children;
                    for i in range(1,len(list(lis))):
                        self.searchUser(keyword, i+1)
                        sleep(5)
        except:
            print "search user occur exception"
            

    def __de__(self):
        self.session.close()
searchWB = SearchWb()
searchWB.searchUser("职业技术学院")