'''
neaten_files
Files of weibo are disordered.
Such as:
    1234567890
    1234567890_friend1
    1234567890_friend2
    1234567890_follow1
    1234567890_follow2
We should neaten them as follows:
    1234567890
    1234567890_friend2 rolling over and dump into 1234567890_friend
    1234567890_friend1 rolling over and dump into 1234567890_friend
    ...
Because the order of one user be another user's friend should be identified
It's useful for prediction
'''
import os
import sys
from BeautifulSoup import Beautiful

'''
获取html，并返回放置html的数组
'''
def getHtmls(dir_name, f):
    htmls = []
    file_obj = open(dir_name+f, 'r')
    page = file_obj.read()
    regex = re.compile('STK && STK\.pageletM && STK\.pageletM\.view\((.*)\)')
    for mat in regex.finditer(page):
        json_obj = simplejson.loads(mat.group(1))
        if not filerJson(json_obj):
            continue
        else:
            htmls.append(json_obj['html'])
    file_obj.close()
    return htmls


'''
过滤json，如果一个json的对象没有html这个标签，那么过滤之
'''
def filterJson(json_obj):
    if json_obj['html'] is None:
        return False

'''
融合文件，将所有的friend或者follow的文件中所有html放置到一个数组中
然后将数组翻转，并写入到最终文件
'''
def mergeFile(dir_name, uid, flag, ls):
    htmls = []
    file_obj = open(dir_name+uid+flag,'w')
    for i in range(1,100000):
        file_f = uid+flag+str(i)
            if not file_f in ls:
                break
            else:
                htmls.append(getHtmls(dir_name, file_friend))
    for i in htmls.reverse():
        file_obj.write(i)
    file_obj.close()

                
'''
遍历整个文件夹，并且从uuid中读出uid信息，并不断试探
'''
def walkDir(dir_name):
    file_obj = open('uuids', 'r')
    try:
        ls = os.listdir(dir_name)
    except:
        print 'access deny'
    else:
        for line in file_obj:
            uid = line[:10]
            if not uid in ls:
                continue
            else:
                mergeFile(dir_name, uid, '_friend', ls)
                mergeFile(dir_name, uid, '_follow', ls)
    file_obj.close()


if __main__ == "__main__":
    walkDir('pages/')
