import HTMLParser
import urllib, urllib2
import httplib
import re
import random
import urlparse

# 13thfloor.at related
STABLE = ('2.2', '2.0')
EXPERIMENTAL = ('2.3',)
VSPATCHESURL='http://vserver.13thfloor.at/Experimental/'

# linux-vserver.org wiki related
TEMPLATEPAGE = 'http://linux-vserver.org/index.php?title=Template:ExperimentalPatchTableMatrix&action=edit'
SUMMARYFIELD = 'wpSummary' # set to the name of the summary field
EDITFIELD = 'wpTextbox1'   # set to the name of the textarea edit field
SUBMITBUTTON = 'wpSave'    # set to the name of the submit button
SUMMARY = 'Updated automatically by autotab script'
# uncomment following fields (and update username and password)
# if you want tu authenticate before updating the Template
LOGINURL = 'http://linux-vserver.org/index.php?title=Special:Userlogin'
LOGOUTURL = 'http://linux-vserver.org/index.php?title=Special:Userlogout'
LOGINFORM = 'userlogin'
SUBMITLOGIN = 'wpLoginattempt'
USERFIELD = 'wpName'
PASSFIELD = 'wpPassword'
#USERNAME = ''
#PASSWORD = ''

# people.linux-vserver.org/~harry/ related
GRPATCHESURL='http://people.linux-vserver.org/~harry/'

class Stripper(HTMLParser.HTMLParser):
    def __init__(self):
        self.meat = []
        HTMLParser.HTMLParser.__init__(self)

    def handle_data(self, data):
        vers = re.compile(r'patch-(((\d+\.\d+)\.\d+)(?:\.\d+)*)-(vs((\d+\.\d+)(?:\.\d+)*(?:-(?:pre|rc)\d+)?)(?:-grsec\d+(?:\.\d+)*-\d+)?)')
# above creates: (full kernel version) (kernel release) (kernel line)
# (full vserver version) (full vserver-only version) (vserver line)
        if vers.match(data):
            self.meat.append([vers.findall(data)[0], data])

class FormStripper(HTMLParser.HTMLParser):
    def __init__(self):
        self.hidden = {}
        self.submit = {}
        self.textarea = []
        self.text = []
        HTMLParser.HTMLParser.__init__(self)

    def handle_starttag(self, tag, attrs):
        temp = dict(attrs)
# we need (name, value) pairs of all the <input type='hidden' ...> fields
# and <input type='submit' ...> fields. For the purpose of verification,
# we also need name(s) of <textarea> field(s), and (optionally)
# name(s) if <input type='text' ...> field(s) to set summary, if possible.
        if tag == 'input':
            typ = temp.get('type')
            if typ == 'hidden':
                self.hidden[temp['name']]=temp['value']
            elif typ == 'submit':
                self.submit[temp['name']]=temp['value']
            elif typ == 'text':
                self.text.append(temp['name'])
        elif tag == 'textarea':
            self.textarea.append(temp['name'])
        elif tag == 'form':
            self.form=temp

class LoginParser(HTMLParser.HTMLParser):
    def __init__(self):
        self.action = self.submit = None
        HTMLParser.HTMLParser.__init__(self)
    def handle_starttag(self, tag, attrs):
        if tag == 'form':
            a = dict(attrs)
            if a.get('name') == LOGINFORM:
                self.action = a['action']
        elif tag == 'input':
            a = dict(attrs)
            if a.get('type') == 'submit':
                if a['name'] == SUBMITLOGIN:
                    self.submit = a['value']

def login():
    vars = set(('USERNAME', 'PASSWORD', 'LOGINURL', 'LOGINFORM',
               'SUBMITLOGIN', 'USERFIELD', 'PASSFIELD'))
    glob = set(globals().keys())

    test = vars & glob # is there faster way to test for presence of
                       # multiple keys in a dictionary?
    if len(test) != 7: # all keys must exist for successfull login
        return None

    lp = LoginParser()
    lp.feed(urllib2.urlopen(LOGINURL).read()) # TODO: move url fetching into
                                              # separate function
    params = { USERFIELD: USERNAME,
               PASSFIELD: PASSWORD,
               SUBMITLOGIN: lp.submit,
             }


    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
    urllib2.install_opener(opener)

    host = urlparse.urlsplit(LOGINURL)[1]
    req = urllib2.Request("http://" + host + lp.action, urllib.urlencode(params))
    f = opener.open(req)
    f.read()
    f.close()

    return opener

def sort_by_int(arg):
    if isinstance(arg, basestring):
        return map(int, arg.split('.'))
    else:
        return None

def latest(meat, depth=4):
    """ take first depth numbers of version string, then take latest
        (highest in numeric sort)
    """
    if depth == 1:
        cutter = re.compile(r'\d+')
    elif depth > 1:
        cutter = re.compile('\d+(?:\.\d+){1,%d}' % (depth-1))
    vsvers = [ cutter.findall(i[0][3]) for i in meat ]
#    def sort_key(arg):
#        if isinstance(arg, basestring):
#            temp = arg.split('.')
#            return map(int, temp)
#        else:
#            return None
    return sorted(vsvers, key=sort_by_int)[-1][0]
    
# for every kernel version take latest vserver release and build a dict
# with kernel release as a key
# test for presence is faster for sets, than for lists (and not sure
# about dicts)
def build_dictionary(meat, limits=EXPERIMENTAL):
    seen = set()
    out_dict = {}
    for item in meat:
        if item[0][4].startswith(limits): # we should compare full vs version string
                                 # here, and drop (vs line) in meat
            kver = item[0][1]
            if kver not in seen:
                seen.add(kver)
                out_dict[item[0][0]] = item
    return out_dict

o = login()

# TODO: add some error handling (but it's planned along with switch to urllib2)
page = urllib.urlopen(VSPATCHESURL)
stripper=Stripper()
stripper.feed(page.read())
latest_vs_ver = latest(stripper.meat)
vs_dict = build_dictionary(reversed(stripper.meat), latest_vs_ver)

gr_page = urllib.urlopen(GRPATCHESURL)
stripper.reset()
stripper.meat = []
stripper.feed(gr_page.read())
gr_dict = build_dictionary(reversed(stripper.meat), latest_vs_ver)

all_keys = set(vs_dict) | set(gr_dict)

table_experimental = [
'{| class="wikitable" style="margin: 2em auto 2em auto;"',
'! Linux-VServer branch',
'Linux kernel',
'!class="devel"| 2.3',
'Experimental',
'! 2.3 + grsecurity',
'Experimental' ]

# we can use sorted(plist) or reversed(plist) here to sort by kernel version,
# and not by latest patches
for item in sorted(all_keys, reverse=True, key=sort_by_int):
    table_experimental.append("|-")
    try:
        vars = vs_dict[item]
    except KeyError:
        vs_patch="|"
    else:
        vs_patch = "| [" + VSPATCHESURL + vars[1] + " " + vars[0][3] + "]"
        kernel = [vars[0][2], vars[0][0]]
    try:
        vars = gr_dict[item]
    except KeyError:
        gr_patch = "|"
    else:
        gr_patch = "| [" + GRPATCHESURL + vars[1] + " " + vars[0][3] + "]"
        kernel = [vars[0][2], vars[0][0]]
    table_experimental.extend(["| [http://www.kernel.org/pub/linux/kernel/v" + kernel[0] + "/linux-" + kernel[1] + ".tar.bz2 " + kernel[1] + "]", vs_patch, gr_patch])

table_experimental.append("|}")

wiki = urllib2.urlopen(TEMPLATEPAGE).read()

# there are two forms on the page, we have to choose the right one by hand here
# that's because HTMLParser chokes on the Googe's JS on the page
form = re.findall(r'<form.*?form>', wiki, re.S)[0] 

inputs = FormStripper()
inputs.feed(form)

# we have a form, now we need to build the body
BOUNDARYHDR='---------------------------'
BOUNDARYHDR+=''.join([ random.choice('0123456789') for i in range(29) ])
BOUNDARY='--' + BOUNDARYHDR

body = []
# hidden fields
for field in inputs.hidden:
    body.append(BOUNDARY)
    body.extend(['Content-Disposition: form-data; name="%s"' % field, ''])
    body.append(inputs.hidden[field])

# submit button
body.append(BOUNDARY)
body.extend(['Content-Disposition: form-data; name="%s"' % SUBMITBUTTON, ''])
body.append(inputs.submit[SUBMITBUTTON])

# summary field (this is optional)
body.append(BOUNDARY)
body.extend(['Content-Disposition: form-data; name="%s"' % SUMMARYFIELD, ''])
body.append(SUMMARY)

# body field
body.append(BOUNDARY)
body.extend(['Content-Disposition: form-data; name="%s"' % EDITFIELD, ''])
body.extend(table_experimental)

# last boundary
body.append(BOUNDARY + '--')

content_type = 'multipart/form-data; boundary=' + BOUNDARYHDR

headers = [
    ('Content-type', content_type),
]

host = urlparse.urlsplit(TEMPLATEPAGE)[1]

req = urllib2.Request("http://" + host + inputs.form['action'], '\r\n'.join(body))
req.add_header('Content-Type', content_type)

if o:
    f = o.open(req)
else:
    f = urllib2.urlopen(req)

f.read()
f.close()

# log out if logged in
if o:
    req = urllib2.Request(LOGOUTURL)
    f = o.open(req)
    f.read()
    f.close()

