#!/usr/bin/env python
#########################################################################
## Application:	    KernelCheck
## File:	    ./KernelPageParser.py
## Author:	    Scott 'deepspring' Smith <contact.sjsmith@gmail.com>
## Last Edit:	    12/04/2009
## Version:	    0.0.2
##
## Purpose:
## ^^^^^^^^
## The purpose of this class is the strip unwanted HTML code from Kernel
## fragment pages collect from kernel.org's servers.
## 
## It collects link data and raw text data for processing.
##
## ======================================================================
## Revision Information
## ======================================================================
##
## 12/04/2009 : SS : 0.0.1 
##	-> Initial File Creation
##
## 12/04/2009 : SS : 0.0.2
##      -> Added get_patch_pkg(filter)
##      -> Added get_patch_version()
##      -> Added get_patch_release()
##
#########################################################################
##

## Imports
import re
import urllib2
import HTMLParser

## Define KernelPageParser Class
## - Extends HTMLParser module
class KernelPageParser(HTMLParser.HTMLParser):

    ## Class variables
    LINK_DATA = []	# Container list for Links
    RAW_DATA = []	# Container list for raw web page data (stripped of HTML code)

    ## HTML start tag handler
    ## Overrides HTMLParser.HTMLParser.handle_starttag()
    ## This is setup for capturing anchor (<a href="">) tags
    ## - returns nothing
    def handle_starttag(self, tag, attrs):
        if tag == 'a':
            for name, value in attrs:
                if name == 'href':
                    self.LINK_DATA.append(value)

    ## Raw text data handler
    ## Overrides HTMLParser.HTMLParser.handle_data()
    ## Used to capture raw web site data without the HTML code
    ## - returns nothing
    def handle_data(self, text):
        ## make sure we aren't adding useless \n characters
        if text != "\n":
            self.RAW_DATA.append(text)

    ## Global cleanup method
    ## - returns nothing
    def cleanup(self):
        ## Clear LINK_DATA list
        del self.LINK_DATA[:]
        ## Clear RAW_DATA list
        del self.RAW_DATA[:]
    
    ## Get link items method
    ## - returns a list
    def get_link_items(self):
        return self.LINK_DATA
    
    ## Get raw data method
    ## - returns a list
    def get_raw_data(self):
        return self.RAW_DATA

    ## Get patch/kernel package
    ## requires filter: patch, prepatch, mmpatch
    ## - Returns a string
    def get_patch_pkg(self, filter="patch"):
        # Stable, normal performance patch
        if filter == "patch":
            # Grab the kernel.org download page for kernels and patches
            html = urllib2.urlopen("http://kernel.org/pub/linux/kernel/v2.6/")
            html = html.read()

            # Fetch kernels list
            kernels = re.findall('<a href="linux-(.*).tar.bz2">', html)

            # Fetch patches list
            patches = re.findall('<a href="patch-(.*).bz2">', html)

            # Reverse the kernels list
            kernels.reverse()

            # Reverse the patches list
            patches.reverse()
            
            # Sift through patches to find 'release' patches
            rel_patches = []
            for p in patches:
	            if re.match(r'^\d+[\.]\d+[\.]\d+[\.]\d+$', p):
		            rel_patches.append(p)
		            
		    base_kernels = []
            for k in kernels:
	            if re.match(r'^\d+[\.]\d+[\.]\d+$', k):
		            base_kernels.append(k)
		            
            if base_kernels[0] == rel_patches[0]:
                return base_kernels[0]
            else:
                return rel_patches[0]

        # Development patch
        if filter == "prepatch":
            print "Finding prepatch"
            # Grab the kernel.org download page for kernels and patches
            html = urllib2.urlopen("http://kernel.org/pub/linux/kernel/v2.6/testing/")
            html = html.read()
            
            # Fetch kernels list
            kernels = re.findall('<a href="linux-(.*).tar.bz2">', html)

            # Reverse the kernels list
            kernels.reverse()
            
            # Sift through patches to find 'release' patches
            prepatches = []
            for p in kernels:
	            if re.match(r'\d[\.]\d[\.]\d*[^?\:]*$', p):
		            prepatches.append(p)
		    if len(prepatches) > 0:
		        return prepatches[0]
		    else:
		        return None

        # Adrew Mortons development patch
        #if filter == "mmpatch":
        #    ## Find the url for the mm patch
        #    for item in self.LINK_DATA:
        #        ## Match on the link that ends in bz2
        #        if re.match(r'^/pub/linux/kernel/people/akpm/patches/.*?bz2$', item):
        #            return item

    ## Get patch version information
    ## - returns string
    def get_patch_version(self):
        #for item in self.RAW_DATA:
        #    ## Match any of X.X.X, X.X.X.X, X.X.X-*
        #    if re.match('^\d[\.]\d[\.]\d*[^?\:]*$', item):
        #        return item
        return

    ## No longer works
    ## Get patch release date
    ## - returns string
    def get_patch_release(self):
    #    for item in self.RAW_DATA:
    #        ## Match (UTC)
    #        if re.match('^.*?(UTC)$', item):
    #            return item
         return


## End of File
## ======================================================================
