#coding=utf-8
import re, urllib, Internet, HTMLParser
from BeautifulSoup import BeautifulSoup
rets_host, rets_url, datapage, description = {}, {}, "", ""

def processHost(list_datas):
    def next_line(list_datas):
        if not list_datas: return None
        return list_datas.pop(0).strip()

    line = next_line(list_datas)
    while line != None:
        host, host_info = "", ""
        if re.findall("^[0-9]+,(.*)$", line): 
            host = re.findall("^[0-9]+,(.*)$", line)[0].strip()
            line = next_line(list_datas)
        else:
            line = next_line(list_datas) # ??
            continue
        while line != None and re.findall("^(\[.*\])$", line): 
            host_info += re.findall("^(\[.*\])$", line)[0]
            line = next_line(list_datas)
        rets_host[host] = host_info

def processUrl(list_datas):
    def checkadd(url, url_info, lastblank):
        a, b = getattr(processUrl, "last_url") if hasattr(processUrl, "last_url") else "", url
        setattr(processUrl, "last_url", url)
        if not lastblank: return # ??
        rex = "^(.*?)([0-9]+)([^0-9]*)$"
        if re.findall(rex, a) and re.findall(rex, b):
            x, y = re.findall(rex, a)[0], re.findall(rex, b)[0]
            if x[0]==y[0] and x[1].isalnum() and y[1].isalnum() and x[2]==y[2]:
                pts = x[0] + ("%0"+str(len(x[1]))+"d") + x[2]
                try:
                    begin, end = int(x[1])+1, int(y[1])
                    if end - begin < 500: # 不能太多
                        for i in range(begin, end):
                            rets_url[pts%i] = url_info + "[AutoIdentifyAdd]"
                except ValueError:
                    pass

    lastblank = False
    for line in list_datas: # 一行一行的解析
        line = line.strip()
        if re.findall("^[a-zA-Z0-9 ]*>", line):
            line = line[line.find(">")+1:]
            if re.findall("^[a-zA-Z]+\:", line):
                rets_url[line] = ""
                checkadd(line, "", lastblank)
                lastblank = False
            elif re.findall("^(\[.*\])([a-zA-Z]+\:.*)$", line):
                url_info, url = re.findall("^(\[.*\])([a-zA-Z]+\:.*)$", line)[0]
                rets_url[url.strip()] = url_info
                checkadd(url.strip(), url_info, lastblank)
                lastblank = False
            else:
                lastblank = False
        else:
            lastblank = line.find("...") != -1 or line.find(u"…") != -1

def html_plain(text, rep):
#    print text.decode("utf8").strip()
    global datapage
    datapage += text.decode("utf8").strip() + "\n"
#    print "-" * 25
    tags = re.findall("<.*?>", text)
    text = text.replace("\n", " ")
    text = text.replace("<br>", "\n").replace("<br />", "\n").replace("<br/>", "\n")
    text = text.replace("<p>", "\n").replace("<p />", "\n").replace("<p/>", "\n")
    text = text.replace("</div>", "\n") 

#    print text
#    print "-" * 100
    for tag in tags: text = text.replace(tag, "")
    text = text.decode("utf8").strip()
    for x, y in [["&quot;", '"'], ["&gt;", ">"], ["&lt;", "<"], ["&amp;", "&"]]: # cgi.escape()
        text = text.replace(x, y)
    
    while text.find("\n\n") != -1:
        text = text.replace("\n\n", "\n")
#    while text != text.strip().replace("\n>\n", "\n>"):
#        text = text.strip().replace("\n>\n", "\n>")
#    print text
#    print "-" * 25
#    print "\n".join(text.split("\n"))
#    print text.encode("utf8")
#    print "=" * 100
    return text.strip().split("\n")

def analyzePageTD(left, right):
    datas = html_plain("%s" % left, "\n")
    processHost(datas)
#    print "-" * 50
    datas = html_plain("%s" % right, "\n")
    processUrl(datas)
#    print "-" * 100

def analyzepage(page):
    global description
    description = re.findall(r"""<meta name="description" content="(.*?)" />""", page)[0]
    description = urllib.unquote(description).decode("utf8")
#    print type(description), description
    beginx = '<div id="msgcns!A6B213403DBD59AF!.*?" class="bvMsg">'
    end = '<div class="footerLinks">'
    for begin in re.findall(beginx, page): # ??
        a = page.find(begin)
        b = page.find(end, a)
        if b == -1: continue

        body = page[a: b]
        try:
            body = urllib.unquote(body).decode("utf8")
        except Exception:
            pass
#        print type(body), body
        try:
            tbodys = BeautifulSoup(body).findAll(name="tbody")
            for tbody in tbodys:
                for tr in tbody.findAll(name="tr"):
                    tds = tr.findAll(name="td")
                    assert len(tds) == 2 # 断言 表格只有两列
                    analyzePageTD(tds[0], tds[1])
        except HTMLParser.HTMLParseError:
            pass # 网页格式出错

# 有异常，说明网页有变动，直接往外抛
def getDataList(addr="http://safelab.spaces.live.com/?_c11_BlogPart_BlogPart=blogview&_c=BlogPart&partqs=cat%3d%25e6%25af%2592%25e7%25bd%2591%25e9%25a2%2584%25e8%25ad%25a6"): 
    "http://code.google.com/p/safelabparser/"
    global rets_host, rets_url, datapage, description
    rets_host, rets_url, datapage, description = {}, {}, "", ""
    if isinstance(addr, int):
        site = 'http://safelab.spaces.live.com/blog/cns!A6B213403DBD59AF!%d.entry' % addr
        page = Internet.visit(site)
    elif addr.startswith("http://"):
        page = Internet.visit(addr)
    else:
        page = file(addr).read()

    analyzepage(page)
    return rets_host, rets_url, datapage.strip(), description.strip()

if __name__ == "__main__":
#    getDataList(862)
#    getDataList("http://safelab.spaces.live.com/blog/cns!A6B213403DBD59AF!872.entry")
    rets_host, rets_url, datapage, description = getDataList("http://safelab.spaces.live.com/blog/cns!A6B213403DBD59AF!951.entry")
#    print datapage
    print "==" * 50
    for x, y in rets_host.items():
        print x, "###", y
    print "--" * 50
    print "Len =", len(rets_host)
    print "==" * 50
    for x, y in rets_url.items():
        print x, "###", y
    print "--" * 50
    print "Len =", len(rets_url)
    print "==" * 50
