from urllib import urlopen
#from urlparse import urlparse
from HTMLParser import HTMLParser
from myLists import UniqueList
import sys
import os
import re

class MyParser(HTMLParser):
    def __init__(self):
        self.prerequisites = False
        self.overlap = False
        self.inUndervisning = False        
        self.invalid = False            # course is not longer available
        self.linkedPages = list()
        self.overlapPages = list()
        HTMLParser.__init__(self)
    
    def handle_starttag(self, tag, attr):
        if len(attr) != 0:
            if tag == 'h2':
                if 'forkunnskaper' in attr[0]: # first time
                    self.prerequisites = True
                elif self.prerequisites == True: # second time
                    self.prerequisites = False

                if 'overlapp' in attr[0]: # first time
                    self.overlap = True
                elif self.overlap == True: # second time
                    self.overlap = False

                if len(attr[0]) == 2 and attr[0][1] == 'statusmessage':
                    self.invalid = True

        if self.prerequisites == True:
            if len(attr) != 0 and tag == 'a':
                a = attr[0][1]
                assert attr[0][0] == 'href', attr
                if a.startswith('http://www.uio.no/studier/emner/'):
                    self.linkedPages.append(a)
        if self.overlap == True:
            if len(attr) != 0 and tag == 'a':
                a = attr[0][1]
                assert attr[0][0] == 'href', attr
                if a.startswith('http://www.uio.no/studier/emner/'):
                    self.overlapPages.append(a)

    def handle_data(self, data):
        if 'Undervises:' in data:
            self.overlap = True
            self.inUndervisning = True
        elif self.inUndervisning == True and '.' in data:
            self.overlap = False
            self.inUndervisning = False
            
class Graphviz:
    def __init__(self, filename):
        self.setFilename(filename)

    def setFilename(self, filename):
        self.filename = filename
        self.f = open(filename, 'w')
        self('digraph G {concentrate=true\n')
        
    def __call__(self, s):
        self.f.write(s)
        self.f.close()
        self.f = open(self.filename, 'a')

    def linkedPages(self, f, t):
        # f: from, t: to
        for item in t:
            self('\t"{1}" -> "{0}";\n'.format(f, item))

    def overlap(self, f, t):
        self('\tsubgraph cluster_{0} {{\n'.format(f.replace('-', '_')))
        self('\t\t"{0}";\n'.format(f))
        for item in t:
            self('\t\t"{0}";\n'.format(item))
        self('\t}\n')
        
    def close(self):
        self('}\n')
        self.f.close()
        print 'Written {0}'.format(self.filename)
        c = "dot -Tpdf {0}.dot -o {0}.pdf".format(self.filename.replace('.dot', ''))
        print '>>', c
        print os.system(c)
        
def strip(url):
    if hasattr(url, "split"):
        s = url.split('/')
        if s[-1] == '' or s[-1].endswith('.xml'):
            ret = s[-2]
        else:
            ret = s[-1]
    else:
        ret = list()
        for u in url:
            ret.append(strip(u))
            #ret[strip(u)] = False       # Recusion!
    return ret

def read(website):
    print 'Visiting', website
    page = urlopen(website)
    encoding = page.headers.getparam('charset')
    if encoding != None:
        s = page.read().decode(encoding)
    else:
        print 'WARNING, could not read', website
        s = ''
    page.close()

    p = MyParser()
    p.feed(s)
    p.close()

    return p

def findDependencies(website):
    p = read(website)
    
    graphviz.linkedPages(strip(website), strip(p.linkedPages)) # show linked courses (arrows)

    if len(p.overlapPages) != 0:
        graphviz.overlap(strip(website), strip(p.overlapPages)) # show overlaping courses (boxed)
        for page in p.overlapPages:
            if read(page).invalid == True:
                graphviz('\t"{0}" [color=red];\n'.format(strip(page))) # show discontinued courses (red)

    if p.invalid == True:
        graphviz('\t"{0}" [color=red];\n'.format(strip(website))) # show discontinued courses (red)

    return p

visitedPages = list()
graphviz = Graphviz('out.dot')

def recursion(website, level=0):
    if level >= 100:
        print 'WARNING: max recurrsion reached at', level, strip(website)
        return level
        
    if strip(website) not in visitedPages:
        #print 'find dep. for', website
        page = findDependencies(website)
        visitedPages.append(strip(website))
        #print 'dep', page.linkedPages
        for website in page.linkedPages:
            level = recursion(website, level)

    return level
if __name__ == '__main__':
    if len(sys.argv) < 2:
        print('usage:python {0} website'.format(sys.argv[0]))
        exit(1)

    skipNext = False
    for ix in range(1, len(sys.argv)):
        if sys.argv[ix] == '-o':
            graphviz.setFilename(sys.argv[ix+1])
            skipNext = True
        elif skipNext:
            skipNext = False
            pass
        else:
            website = sys.argv[ix]
            if not(website.startswith('http://')):
                common = 'http://www.uio.no/studier/emner/'
                if re.match('INF.*\d\d\d\d', website) != None:
                    website = common + 'matnat/ifi/' + website
                elif re.match('FYS.*\d\d\d\d', website) != None or re.match('UNIK.*\d\d\d\d', website):
                    website = common + 'matnat/fys/' + website
                elif re.match('BIO.*\d\d\d\d', website) != None:
                    website = common + 'matnat/biologi/' + website
                elif re.match('MBV.*\d\d\d\d', website) != None:
                    website = common + 'matnat/molbio/' + website                
                elif re.match('KJM.*\d\d\d\d', website) != None:
                    website = common + 'matnat/kjemi/' + website            
                elif re.match('MAT(-\w\w\w)?\d\d\d\d', website) != None:
                    website = common + 'matnat/math/' + website
                elif re.match('EX(PHIL)|(FAC).*', website) != None:
                    website = common + 'hf/ifikk/' + website
                else:
                    print "Sorry, not recognized", website
                    exit(1)
            graphviz('\t"{0}" [shape=house];\n'.format(strip(website)))
            recursion(website)

#         page = findDependencies(website)
#         visitedPages = list()
#         visitedPages.append(website)
#         for website in page.linkedPages:
#             page = findDependencies(website)
#             visitedPages.append(website)
#             for website in page.linkedPages:
#                 if website not in visitedPages:
#                     page = findDependencies(website)
#                     visitedPages.append(website)                    

    graphviz.close()
