import sgmllib
import urllib2
import re
import os
import time
import random
from urllib2 import HTTPError

ROOTPATH = os.environ['PWD']
ROOTPAGE = 'http://www.verycd.com'
CLASSIFICATION = '/archives/movie/'
MAXIMUMCONNECTION = 999000000
ccount = 0

DEBUG = 1 
finedebug = 0

def parsePage(url, file_path, file_name, parser):
    
    s = None
    if DEBUG == 1:
        print 'url = %s' %url
    #  delay protection
    time.sleep(random.randint(3,8))

    global ccount
    if ccount > MAXIMUMCONNECTION: 
        print 'exit for protection'
        exit()
    while(True):
        try: 
            # get the web
            if DEBUG == 1:
                print 'connecting'
            web = urllib2.urlopen(url, timeout=30)
            ccount += 1
            s = web.read()
            if DEBUG == 1:
                print 'success'

            # write the html file to local disk
            if not os.path.exists(file_path):
                os.makedirs(file_path)
            fi = open(file_path + '/'+ file_name,'w')
            fi.write(s)
            fi.close()
            break
        except HTTPError, e:
            print e
            if e.code == 404:
                break

        except IOError, e:
            print e
            time.sleep(5)
        #create a file when error occurs

        except Exception, e:
            print e
        
    # find hyperlinks and return them
    if s == None:
        return None
    parser.reset()
    parser.parse(s)
    return parser.get_hyperlinks()
    

class MyParser(sgmllib.SGMLParser):
    "A simple parser class."

    def parse(self, s):
        "Parse the given string 's'."
        self.feed(s)
        self.close()

    def __init__(self, verbose=0):
        "Initialise an object, passing 'verbose' to the superclass."

        sgmllib.SGMLParser.__init__(self, verbose)
        self.hyperlinks = []
        self.descriptions = {}
        self.inside_a_element = 0
        self.starting_description = 0

    def start_a(self, attributes):
        "Process a hyperlink and its 'attributes'."

        for name, value in attributes:
            if name == "href":
                self.hyperlinks.append(value)
                self.inside_a_element = 1
                self.value = value

    def end_a(self):
        "Record the end of a hyperlink."

        self.inside_a_element = 0

    def handle_data(self, data):
        "Handle the textual 'data'."

        if self.inside_a_element:
            self.descriptions[self.value] = data  
            self.starting_description = 0

    def get_hyperlinks(self):
        "Return the list of hyperlinks."

        return self.hyperlinks

    def get_descriptions(self):
        "Return a list of descriptions."

        return self.descriptions

    def reset(self):
        sgmllib.SGMLParser.reset(self)
        self.hyperlinks = []
        self.descriptions = {}


if __name__ == '__main__':

    myparser = MyParser()

    url = ROOTPAGE + CLASSIFICATION
    file_path = ROOTPATH + CLASSIFICATION 
    extension = '.html'
    
    # get hyperlinks from root web
    links = parsePage(url, file_path, 'root'+extension, myparser)
    
    page_descriptions = {}
    pages = []


    # find pages, use pages[] and page_descriptions{} to keep page hyperlins
    # and descriptions
    regular_page = re.compile('/archives/movie/\d+.html')

    for i in links:
        r = regular_page.match(i)
        if r:
            if DEBUG == 1:
                print 'found a archive page: %s'%i
            s = r.group()
            pages.append(s)
            page_descriptions[s] = myparser.get_descriptions()[s]

    # skip the pages for already done
    
    pages= pages[76:]

    for i in pages:
        file = page_descriptions[i]
        SUBPATH = file

        links = parsePage(ROOTPAGE+i,\
                          file_path + SUBPATH,\
                          file+extension,\
                          myparser)
        topics = [] 
        topic_descriptions = {}

        # find topics and store them by topics[]and topic_descriptions{} 
        regular_topic = re.compile('/topics/\d+/')        
        for j in links:
            r = regular_topic.match(j)
            if r:
                if DEBUG == 1:
                    print 'found a topic: %s'%j
                s = r.group()
                topics.append(s)
                topic_descriptions[s] = myparser.get_descriptions()[s]

        regular_id = re.compile('\d+')

        for j in topics:
                description = topic_descriptions[j]
                dir = regular_id.search(j).group()

                if finedebug == 1:
                    print ROOTPAGE+j
                # save the webpages of each topic on local disk
                temp = parsePage(ROOTPAGE+j,\
                          file_path+SUBPATH + '/' + dir,\
                          'topic'+extension,\
                          myparser)
                if temp == None:
                    continue
                '''
                    save the page views
                    j ='/topic/2811927/'
                '''
                page_view = 'http://stat.verycd.com/counters/folder/'
                temp = j.split('/')
                if DEBUG == 1:
                    print page_view + temp[2]
                while(True):
                    try:
                        stat = urllib2.urlopen(page_view +temp[2], timeout=30)
                        break
                    except IOError, e:
                        print e
                        print 'going to reconnect after 5 sec'
                        time.sleep(5)
                    except Exception, e:
                        print e
                while(True):
                    try:
                        file = open(file_path + SUBPATH +'/' + dir +\
                                   '/pageview.html','w')
                        stat = stat.read()
                        file.write(stat)
                        file.close()
                        break
                    except IOError, e:
                        print e
                        time.sleep(5)




                '''
                    save the name of topic
                '''
                try:
                    namefile = open(file_path+SUBPATH +'/'+dir+\
                                    '/name.txt', 'w')
                    namefile.write(description)
                    namefile.close()
                except IOError, e:
                    print e

                links = parsePage(ROOTPAGE+j+'comments/',\
                                 file_path+SUBPATH + '/' +dir\
                                  + '/' + 'comments/',\
                                 'page1'+extension,\
                                 myparser)
                # find largest page number of comments
                regular_comment = re.compile(\
                            'http://www.verycd.com/topics/\d+/comments/page\d+')
                max = 1
                for k in links:
                    if k == None:
                        continue
                    r = regular_comment.match(k)
                    if r:
                        if DEBUG == 1:
                            print 'found a comment page: %s' %k 
                        s = r.group()
                        s = s[s.rfind('page')+4:]
                        #largely depends on the link formate
                        if int(s) > max:
                            max = int(s)
                if DEBUG == 1:
                    print 'max = %d'%max
                # base on the max, store the comments on local disk
                if max != 1:
                    for k in range(1,max+1):
                        parsePage(ROOTPAGE + j + 'comments/page' + str(k),\
                                 file_path+SUBPATH + '/' + dir\
                                  + '/' + 'comments/',\
                                'page' + str(k) + extension,\
                                myparser)
    #print "work is done"



