#!/usr/bin/env python
# -*- coding: utf8 -*-
import xml.dom.minidom
import urllib, mediawiki

class NewPages:
    def __init__(self, index):
        self.site = mediawiki.login()
        self.index = index

    def _retriveXML(self, feed, limit, hidebots, hidepatrolled, offset):
        params = {'feed': feed,
                  'namespace': 0,
                  'limit': limit,
                  'title': 'Special:最新页面',
                  'hidebots': hidebots and 1 or 0,
                  'hidepatrolled': hidepatrolled and 1 or 0,
                  'offset': offset
                  }
        url = self.index + '?' + urllib.urlencode(params)
        return self.site._request(url = url)

    def _parseXML(self, XMLData):
        ret = []
        document = xml.dom.minidom.parseString(XMLData)
        items = document.getElementsByTagName('item')
        for item in items:
            data = {}
            for node in item.childNodes:
                name = node.nodeName
                value = ''
                if node.childNodes.length > 0 and node.childNodes[0].nodeType == node.TEXT_NODE:
                    value = node.childNodes[0].data
                if value and not value.strip():
                    continue
                else:
                    data[name] = value
            ret.append(data)
        return ret

    def retrive(self, feed = 'rss', limit = 9, hidebots = False, hidepatrolled = False, offset = None):
        XMLData = self._retriveXML(feed, limit, hidebots, hidepatrolled, offset)
        newpages = self._parseXML(XMLData)
        return newpages

def test():
    index = 'http://zh.wikipedia.org/w/index.php'
    np = NewPages(index)
    data1 = np.retrive(hidepatrolled = True)
    #print data1[0], data1[0]['title']

if __name__ == '__main__':
    test()