import urllib2
import re
import copy
import sys
level = 5
root = 'http://verified.weibo.com'
visited_link = set()
tobe_link = set()
fresh_link = set()
uid_set = set()
tobe_link.add(root)

def writeDown():
    print uid_set


def linkReshape(matched):
    for i in range(len(matched)):
        matched[i] = matched[i].replace('"','').replace('\\','').replace('href=','')
        if matched[i].find('http') == -1:
            construct_url = [root, matched[i]]
            delimeter = '/'
            matched[i] = delimeter.join(construct_url)
        if matched[i][19:] == 'http://weibo.com/u/':
            matched.remove(matched[i])



def pageVisit(link, new_list, visited_list, uid_file):
    try:
        content = urllib2.urlopen(link).read()
    except:
        sys.stderr.write(link)
        return
    regex = re.compile('href=\S*')
    matched = regex.findall(content)
    linkReshape(matched)
    for mat in matched:
        if mat not in visited_list:
            new_list.add(mat)
    visited_list.add(link)
    regex = re.compile('uid=[0-9]{10,10}')
    matched = regex.findall(content)
    print '%d:%d'%(len(visited_list), len(matched))
    for mat in regex.findall(content):
        # uid_set.add(mat)
        uid_file.write(mat[-10:]+'\n')


def linkVisit(link_list, new_list, visited_list, uid_file):
    if not link_list:
        return;
    else:
        new_list.clear()
        for lnk in link_list:
            if lnk not in visited_list:
                ''' visit '''
                pageVisit(lnk, new_list, visited_list, uid_file)
        return;


def main():
    uid_file = open('uids', 'w')
    for layer in range(level):
        ''' loop for visit '''
        linkVisit(tobe_link, fresh_link, visited_link, uid_file)
        linkVisit(fresh_link, tobe_link, visited_link, uid_file)
    # writeDown()
    uid_file.close()


if __name__ == "__main__":
    main()
