#!/usr/bin/env python
#-*- coding: UTF-8 -*-

#-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-#
# This file is part of lz.
#
# lz is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# lz is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with lz.  If not, see <http://www.gnu.org/licenses/>.
#-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-o0o-#

# to parse a single page, eliminate posts written by other than lz
# 
class LzFilter():
    def __init__(self, verbose=0):
        self.verbose = verbose

        self.lz = ""
        self.lz_articles = ""
        self.next_page_url = ""

        self.first_post_anchor = '<TABLE id=\"firstAuthor\" align=center border=0 cellSpacing=0 width=\'100%\'><TR>'
        self.first_author_anchor = '&idwriter=0&key=0 target=_blank>'
        self.first_author_end = '</a>&'

        self.post_anchor = '<TABLE cellspacing=0 border=0 bgcolor=f5f9fa width=100% ><TR><TD WIDTH=100 ALIGN=RIGHT VALIGN=bottom></TD><TD><font size=-1 color=green><br><center>作者：<a href=\"/browse/Listwriter.asp?vwriter='
        self.author_end_anchor = '&idwriter=0&key=0\"   target=_blank>'

        self.next_page_anchor = '<a style=text-decoration:underline; href='
        self.next_page_end = '>下一页</a>]'

    def SetLz(self, lz):
        self.lz = lz
        if (self.verbose):
            print "lz is set as", lz

    def GetLz(self):
        return self.lz

    def ParsePage(self, page, is_first_page = 0):
        #clear the content
        self.lz_articles = ""

        #XXX does not work yet
        #find the next page
        next_page_end = page.find(self.next_page_end)
        if self.verbose:
            print "find next_page_end in", next_page_end

        if next_page_end > 0:
            next_page_idx = page.rfind(self.next_page_anchor, 0, next_page_end) + len(self.next_page_anchor)
            self.next_page_url = page[next_page_idx:next_page_end]
            if self.verbose:
                print "next page is", self.next_page_url
        else:
            self.next_page_url = ""
            if self.verbose:
                print "next page is EMPTY"
        
        last_idx = page.find(self.first_post_anchor)
        if self.verbose:
            print "1st post found in", last_idx

        #get the 1st author
        author_idx = page.find(self.first_author_anchor, last_idx) + len(self.first_author_anchor)
        author_end = page.find(self.first_author_end, author_idx)
        first_author = page[author_idx:author_end]
        if self.verbose:
            print "first_author",first_author,"found from", author_idx, "to", author_end

        #get the heading & lz's name, only done once
        if is_first_page == 1:
            self.lz = first_author
            self.lz_articles += page[:last_idx]
            if self.verbose:
                print "parsing the 1st page, let's add the heading"
                print page[:last_idx]
            if self.verbose:
                print "lz is",self.lz

        #get the end of the 1st post, aka the beginning of next post
        idx = page.find(self.post_anchor, author_end)
        if self.verbose:
            print "end of the 1st post is at", idx
        if first_author == self.lz:
            if idx == -1:
                self.lz_articles += page[last_idx:]
                if self.verbose:
                    print "reach the end of current page"
                return
            else:
                self.lz_articles += page[last_idx:idx]
                if self.verbose:
                    print "append the 1st post" + page[last_idx:idx]
              
        #parse the following posts and retrieve lz's articles
        by_lz = 0
        last_idx = idx
        while idx < len(page):
            idx = page.find(self.post_anchor, last_idx+1)
            if idx == -1:
                break

            #previouse content was written by lz
            if by_lz == 1:
                self.lz_articles += page[last_idx:idx]
                if self.verbose:
                    print "lz article found from", last_idx, "to", idx
                    print "lz is", self.lz, "author is", author
                    print page[last_idx:idx]
            
            if (idx >= 0):
                if self.verbose:
                    print "another post found in", idx

                author_idx = idx + len(self.post_anchor)
                author_end = page.find(self.author_end_anchor, author_idx)
                author = page[author_idx:author_end]

                if author == self.lz:
                    by_lz = 1                    
                    if self.verbose:
                        print "and it's written by",self.lz
                else:
                    by_lz = 0
                    if self.verbose:
                        print "but it's written by", author

            last_idx = idx

        #the last poster is written by lz
        if by_lz == 1:
            self.lz_articles += page[last_idx:]
            if self.verbose:
                "the last post is also written by", self.lz        

    
    def LzContent(self):
        return self.lz_articles
    
    def NextPageUrl(self):
        return self.next_page_url


#to parse a single page and retrieve following pages
#XXX currently this class is not in use
#
import sgmllib

class PageManager(sgmllib.SGMLParser):
    def __init__(self, verbose=0):
        sgmllib.SGMLParser.__init__(self, verbose)
        self.verbose = verbose
        self.in_table = 0
        self.in_td = 0
        self.in_a = 0
        self.in_pageDiv = 0
        self.page_url = ""
        self.next_page_found = 0

    def parse(self, s):
        self.feed(s)
        self.close()
        
    def start_table(self, attributes):
        self.in_table = 1
        if self.verbose:
            print "start table"
        
    def end_table(self):
        self.in_table = 0
        if self.verbose:
            print "end table"

    def start_td(self, attributes):
        self.in_td = 1
        if self.verbose:
            print "start td"

        for name, value in attributes:
            if name=="id" and value=="pageDivTop":
                self.in_pageDiv = 1
                if self.verbose:
                    print "in pageDivTop"

    def end_td(self):
        self.in_td = 0
        self.in_pageDiv = 0

    def start_a(self, attributes):
        self.in_a = 1
        for name, value in attributes:
            if name=="href":
                self.page_url = value
                if self.verbose:
                    print "found an url", self.page_url

    def end_a(self):
        self.in_a = 0

    def handle_data(self, data):
        if self.in_pageDiv and self.in_a and data==u"下一页".encode('utf-8'):
            self.next_page_found = 1
            if self.verbose:
                print "!!!!!!found next page url!!!!!!"

    def next_page_url(self):
        if self.next_page_found:
            return self.page_url
        else:
            return ""

# execution starts

# to get the 1st page's content and analyze
import urllib
import sys
import getopt

def main():
    #process the command line options
    try:
        opts, args = getopt.getopt(sys.argv[1:], "ho:v", ["help", "output="])
    except getopt.GetoptError, err:
        # print help information and exit:
        print str(err)
        usage()
        sys.exit(2)
    output = "out.html" #the default output file
    verbose = False
    for o, a in opts:
        if o == "-v":
            verbose = True
        elif o in ("-h", "--help"):
            usage()
            sys.exit()
        elif o in ("-o", "--output"):
            output = a
        else:
            assert False, "unhandled option"
    if len(args) != 1:
        usage()
        sys.exit(2)
    else:
        addr = args[0] #get the input webpage url

    page = urllib.urlopen(addr)
    page_content = page.read().decode('gb18030').encode('utf-8')

    lzFilter = LzFilter(verbose)

    #parse the 1st page
    lzFilter.ParsePage(page_content, 1)
    lzContent = lzFilter.LzContent()

    #output the retrieved content to a file
    fout = open(output, "a")
    fout.write(lzContent.decode('utf-8').encode('gb18030'))

    #continue to fetch following pages and parse
    #page_man = PageManager(1)
    addr = lzFilter.NextPageUrl()
    while (addr != ""):
        print "start to retrieve another page:",addr
        page = urllib.urlopen(addr)
        page_content = page.read().decode('gb18030').encode('utf-8')
        lzFilter.ParsePage(page_content)
        lzContent = lzFilter.LzContent()
        fout.write(lzContent.decode('utf-8').encode('gb18030'))
        addr = lzFilter.NextPageUrl()

    fout.close()

def usage():
    print "Usage: lz.py [options] url"
    print "Options:"
    print "-h, --help           Display this message"
    print "-o, --output <file>  Place the output into <file>"
    print "-v                   Display the verbose messages"

# really execute the main function
main()
