# coding=utf-8

import sys
import urllib2
import re
import db
from weibo_login import WeiboLogin

__author__ = 'weijinshi'

reload(sys)
sys.setdefaultencoding('utf-8')


def auto_crawl(username, pwd, num):
    if not WeiboLogin(username, pwd).login():
        return -1
    self_uid = get_login_user_uid()
    uid_list = db.get_auto_crawl_users(num)
    for uid in uid_list:
        if uid[0] != self_uid:
            get_weibo_relation(uid[0])
        else:
            get_self_weibo_relation(self_uid)


def crawl_by_uid(uid, username, pwd):
    if not WeiboLogin(username, pwd).login():
        return -1
    self_uid = get_login_user_uid()
    if uid == self_uid:
        get_self_weibo_relation(self_uid)
    else:
        get_weibo_relation(uid)


def get_login_user_uid():
    req = urllib2.Request('http://weibo.com')
    response = urllib2.urlopen(req)
    redirect_url = response.geturl()
    match = re.compile(u'/\d+/')
    search_result = re.search(match, redirect_url)
    return search_result.group(0)[1:-1]


def get_self_weibo_relation(self_uid):
    req = urllib2.Request(url='http://weibo.com/' + self_uid + '/myfollow')
    result = urllib2.urlopen(req)
    text = result.read().decode('utf-8')

    # 为了拿到自己的 昵称 总关注数目 总粉丝数
    match = re.compile(u'class="gn_name" target="_top" title="[\s\S]*?"')
    search_result = re.search(match, text)
    self_nick = search_result.group(0)[37:-1].encode('utf-8')

    match = re.compile(u'全部关注\(\d+\)')
    search_result = re.search(match, text)
    follows = search_result.group(0)[5:-1]

    match = re.compile(u'粉丝\(\d+\)')
    search_result = re.search(match, text)
    fans = search_result.group(0)[3:-1]

    # 把自己加到数据库中
    db.add_user(self_uid, self_nick, follows, fans)

    match = re.compile(u'uid=\d+')
    find_result = re.findall(match, text)
    uid_list = {}.fromkeys(find_result).keys()

    page = 1
    while len(uid_list) < int(follows):
        print page, len(uid_list)
        match = re.compile(u'下一页')
        find_result = re.findall(match, text)
        result = {}.fromkeys(find_result).keys()
        if len(result) > 0:
            page += 1
            req = urllib2.Request(url='http://weibo.com/' + self_uid + '/myfollow?t=1&page=' + str(page))
            result = urllib2.urlopen(req)
            text = result.read().decode('utf-8')
            match = re.compile(u'uid=\d+')
            find_result = re.findall(match, text)
            uid_list += {}.fromkeys(find_result).keys()
        else:
            break

    print 'len(uid_list)=', len(uid_list)
    uid_list = get_real_uid_list(uid_list)
    uid_list = list(set(uid_list))  # remove duplicate element
    if self_uid in uid_list:
        uid_list.remove(self_uid)
    print uid_list

    if len(uid_list) > 0:
        for uid in uid_list:
            db.add_relation(self_uid, uid)
            get_userinfo(uid)
    # 更新自己的 db_follows
    db_follows = db.count_db_follows(self_uid)
    db.update_user_db_follows(self_uid, db_follows)


def get_weibo_relation(uid):
    if not db.is_user_exist(uid):
        get_userinfo(uid)
    user = db.query_user(uid)
    if user:
        try:
            total_follows_num = user[2]
            req = urllib2.Request(url='http://weibo.com/' + uid + '/follow')
            result = urllib2.urlopen(req)
            text = result.read().decode('utf-8')
            match = re.compile(u'uid=\d+')
            rawlv2 = re.findall(match, text)
            uid_list = {}.fromkeys(rawlv2).keys()

            page = 1
            print page, 'uid_list=', len(uid_list)
            while len(uid_list) < total_follows_num:
                match = re.compile(u'下一页')
                rawlv2 = re.findall(match, text)
                result = {}.fromkeys(rawlv2).keys()
                if len(result) > 0:
                    req = urllib2.Request(url='http://weibo.com/' + uid + '/follow?page=' + str(page))
                    result = urllib2.urlopen(req)
                    text = result.read().decode('utf-8')
                    match = re.compile(u'uid=\d+')
                    rawlv2 = re.findall(match, text)
                    uid_list += {}.fromkeys(rawlv2).keys()
                    uid_list.remove('uid=' + uid)
                page += 1
                print page, 'uid_list=', len(uid_list)
                if page > 11:
                    break

            print 'len(uid_list)=', len(uid_list)
            uid_list = get_real_uid_list(uid_list)
            if len(uid_list) > 0:
                for u in uid_list:
                    db.add_relation(uid, u)
                    get_userinfo(u)

            # 更新自己的 db_follows
            db_follows = db.count_db_follows(uid)
            db.update_user_db_follows(uid, db_follows)
            return 1
        except Exception, e:
            print '>>>[Error: get_weibo_relation]', uid, e
    return -2


def get_userinfo(uid):
    if not db.is_user_exist(uid):
        print uid
        req = urllib2.Request(url='http://weibo.com/' + uid + '/follow', )
        result = urllib2.urlopen(req)
        try:
            text = result.read().decode('utf-8')
            match = re.compile(u'<title>[\s\S]*?的微博')
            search_result = re.search(match, text)
            nick = search_result.group(0)[7:-3].encode('utf-8')

            # 为了拿到总关注数目 和 总粉丝数
            # 匹配 <strong node-type=\"follow\">455<\/strong>\r\n\t\t\t<span>关注 <\/span>\r\n\t\t<\/a>\r\n\t<\/li>\r\n\t<li class=\"follower S_line1\">\r\n\t\t<a class=\"S_func1\" name=\"place\" href=\"\/p\/1005051867684872\/follow?relate=fans&from=100505&wvr=5&mod=headfans\">\r\n\t\t\t<strong node-type=\"fans\">760<\/strong>\r\n\t\t\t<span>粉丝
            match = re.compile(u'<strong[\s\S]*?>粉丝')
            search_result = re.search(match, text)
            result = search_result.group(0).encode('utf-8').split('>')
            follows = result[1].split('<')[0]
            fans = result[9].split('<')[0]
            if re.match(r'\d+', fans):
                db.add_user(uid, nick, follows, fans)
                return True
            else:
                # 有些页面匹配也会出错，先不往数据库中插数据
                print '>>>[Error: get_userinfo]', uid, nick, follows, fans
                # print {}.fromkeys(rawlv2).keys()[0].encode('utf-8')
        except Exception, e:
            # 企业版的比较特殊
            print '>>>[Error: get_userinfo]', uid, e
    return False


def get_real_uid_list(uid_list):
    for i in range(len(uid_list)):
        uid_list[i] = uid_list[i][4:]
    return uid_list


if __name__ == '__main__':
    username = ''
    pwd = ''
    auto_crawl(username, pwd, 20)