#!/usr/bin/env python
#-*- coding: utf8 -*-
"""
Essential library for wiki
"""
import json, os, re, time, urllib2
from datetime import datetime
from dateutil.relativedelta import relativedelta as time_diff
from xplor import _Surfer
from functions import capitalize, encode, decode, is_ip, quote, wikiquote
from core.optimizers import fmt, printf, File
from sitematrix import *

languages = File("languages.log").get()
actions = (
	'sitematrix', 'opensearch', 'query', 'expandtemplates', 'parse', 'feedwatchlist',
	'help', 'paraminfo', 'purge', 'rollback', 'delete', 'undelete', 'protect', 'block',
	'unblock', 'move', 'edit', 'upload', 'emailuser', 'watch', 'patrol', 'import',
	'userrights'
)
post_actions = (
	'login', 'purge', 'rollback', 'delete', 'undelete', 'protect', 'block',
	'unblock', 'move', 'edit', 'upload', 'emailuser', 'import', 'userrights'
)

class NotSamePage:
	"""The two revids given are not for the same page"""

class NoSuchSite(Exception):
    """Site does not exist"""

class NoSuchPage(Exception):
    """Page does not exist"""

class Time:
	def __init__(self, obj=None):
		if isinstance(obj, basestring):
			if obj[10] == "T" and obj[-1]=="Z":
				self.obj = datetime.strptime(obj, "%Y-%m-%dT%H:%M:%SZ")
			elif "/" in obj:
				self.obj = datetime.strptime(obj, "%H:%M:%S %d/%m/%Y")
			elif "-" in obj:
				self.obj = datetime.strptime(obj, "%Y-%m-%d %H:%M:%S")
		elif isinstance(obj, tuple):
			self.obj = datetime(*obj)
		elif isinstance(obj, float):
			self.obj = datetime.fromtimestamp(obj)
		elif obj != None:
			self.obj = obj
		else:
			self.obj = datetime.today()-time_diff(hours=2)

	def __call__(self):
		return self.obj

	def add(self, n, c="h"):
		if c == "Y":
			return self.obj + time_diff(years=n)
		elif c == "M":
			return self.obj + time_diff(months=n)
		elif c == "D":
			return self.obj + time_diff(days=n)
		elif c == "h":
			return self.obj + time_diff(hours=n)
		elif c == "m":
			return self.obj + time_diff(minutes=n)
		elif c == "s":
			return self.obj + time_diff(seconds=n)

	def subtract(self, quantity, unity="h"):
		if unity == "Y":
			return self.obj - time_diff(years=quantity)
		elif unity == "M":
			return self.obj - time_diff(months=quantity)
		elif unity == "D":
			return self.obj - time_diff(days=quantity)
		elif unity == "h":
			return self.obj - time_diff(hours=quantity)
		elif unity == "m":
			return self.obj - time_diff(minutes=quantity)
		elif unity == "s":
			return self.obj - time_diff(seconds=quantity)

	def yesterday(self):
		self.obj = datetime.today() + time_diff(days=-1)
		return self.to_api()

	def to_api(self):
		return self.obj.strftime("%Y-%m-%dT%H:%M:%SZ")

	def to_api_oclock(self):
		return self.obj.strftime("%Y-%m-%dT00:00:00Z")

	def to_simple_string(self):
		return self.obj.strftime("%Y-%m-%d %H:%M:%S")

	def to_string(self):
		return self.obj.strftime("%H:%M:%S %d/%m/%Y")

	def to_abredged_string(self):
		return self.obj.strftime("%H:%M %d-%m-%y")

	def to_hour_string(self):
		return self.obj.strftime("%H:%M:%S")

	def to_date_string(self):
		return self.obj.strftime("%Y-%m-%d")

	def to_short_local_string(self):
		return self.obj.strftime("%d %b %y, %H:%M")

class WikiLink:
	def __init__(self, string, lang=None, fam=None, is_user=False, error="skip"):
		self._string = string
		m = re.search(
			ur"(?P<pref>([\w\-:]*[\\dchjlLnrR]??|l:[\w\-]{2,})?)(?P<begin>\{\{\{?|\[\[)"
			ur"(?P<link>[^\{\}\|]+?)(?:\|(?P<params>[^\}]+?))?(?P<end>\}\}\}?|\]\])",
			self._string
		)
		if m:
			groups = m.groupdict()
			self._is_link = groups['begin'] == "[[" and groups['end'] == "]]"
			self._is_template = groups['begin'] == "{{" and groups['end'] == "}}"
			self._link = groups['link']
			self._params = groups['params']
			self._pref = groups['pref']
		else:
			self._is_link = self._is_template = self._params = self._pref = None
			self._link = self._string.split("|")[0]
			self._link = self._link.replace("::","")

		#if ":" in self._link: self._link = self._link.split(":")
		self._lang = lang or "meta"
		self._fam = fam or "wikimedia"
		self._sitename = "meta.wikimedia.org"
		self._site = Site(self._lang, self._fam)
		self._ns = None
		self._title = None
		self._pagename = None # pagename = namespace name + : + title
		self._url = ""
		self._is_user = is_user
		self._query = None
		self._section = None
		self._error = error
		self.slice()

	def site(self):
		return self._site

	def sitename(self):
		return self._sitename

	def abredged_sitename(self):
		return self.site().abredged_sitename()

	def family(self):
		return self._fam

	def language(self):
		return self._lang

	def title(self):
		return self._title

	def namespace(self):
		return self._ns

	def url(self):
		m = re.search(u"(https?://[^.]+\.([^.]+)\.org/wiki/)(.+)", self._url)
		if not m: return self._url
		path  = m.group(1)
		fam  = m.group(2)
		title = m.group(3)
		ns=""
		elements=title.split(":", 1)
		ns_test = elements[0].replace("_"," ")
		if ns_test in _sites[self._sitename].namespaces():
			ns = elements[0]
			ns = re.sub("[Mm]edia[Ww]iki", "MediaWiki", ns, 1)
			ns = ns+":"
			title=elements[1]
		if fam != "wiktionary":
			title = title[0].upper() + title[1:]
		return path+ns+title

	def normalize_title(self):
		ns=""
		ns_test = self._title.replace("_"," ").lower()
		if ns_test in _sites[self._sitename].namespaces():
			self._title = re.sub("[Mm]edia[Ww]iki", "MediaWiki", self._title, 1)
		if self.family() != "wiktionary":
			self._title = self._title[0].upper() + self._title[1:]
		#self._title = wikiquote(self._title)
		return self._title

	def clean_link(self):
		if self._is_template:
			el="DEFAULTSORT:"
			if el in self._link: self._link = self._link.replace(el,"")

		if not self._is_template or self._is_template and not self._params:
			#%26 &; %3D =
			self._query= "?%s" % quote(self._link.split("?")[1]) if "?" in self._link else ""
			self._query = self._query.replace("%26","&").replace("%3D","=")
			self._section = "#%s" % quote(self._link.split("#")[1]).replace("%",".") if "#" in self._link else ""
		else:
			self._query = "?%s" % self._params.split("?")[1] if "?" in self._params else ""
			self._section ="#%s" % quote(self._params.split("#")[1]).replace("%",".") if "#" in self._params else ""
			self._params = self._params.split("#")[0]
			self._params = self._params.split("?")[0]
		self._link = self._link.split("?")[0]
		self._link = self._link.split("#")[0]

	def fetch_site(self):
		links = self._pref if self._is_template else self._link
		if links.startswith(":"): links = links[1:]
		chunks = links.split(":")[:-1]
		lang=""; fam=""
		if links.startswith(":"): links = links[1:]
		chunks = links.split(":")[:-1]
		for chunk in chunks:
			if chunk.strip().lower() in languages:
				lang = chunk.strip().lower()
				links = links.replace("%s:" % chunk, "", 1)
			elif wm_divs.has_key(chunk.strip().lower()):
				lang = wm_divs[chunk.strip().lower()]
				fam = "wikimedia"
				links = links.replace("%s:" % chunk, "", 1)
			if families.has_key(chunk.strip().lower()):
				fam = families[chunk.strip().lower()]
				links = links.replace("%s:" % chunk, "", 1)
			elif projects.has_key(chunk.strip().lower()):
				fam = projects[chunk.strip().lower()]
				lang=""
				links = links.replace("%s:" % chunk, "", 1)

		if not lang and not fam:
			lang = self._lang if self._lang in languages else "meta"
			fam = families[self._fam] if families.has_key(self._fam) else projects["wm"]

		if lang in languages and not families.has_key(fam):
			fam = families[self._fam] if families.has_key(self._fam) else families["w"]
		elif not lang in languages and families.has_key(fam):
			lang = self._lang if self._lang in languages else "en"


		dom = ""
		if families.has_key(fam):
			dom = "org"
		elif fam == "wikimediafoundation":
			dom = "org"
		elif fam == "mediawiki":
			dom = "org"; lang= "www"
		elif fam == "translatewiki":
			dom="net"
		elif fam == "viquimedia":
			dom = "cat"; lang = "www"
		elif fam == "wikimedia":
			dom = "org"
			chapter = "%swikimedia" % links.split(":")[0]
			if _sitematrix.has_key(chapter):
				lang = links.split(":")[0]
				links = links.replace("%s:" % lang, "", 1)
				lang = lang.replace("-",".")
			if not lang: fam = "wikimediafoundation"
		elif fam == "wikiversity":
			wikiversity = "%swikiversity" % links.split(":")[0]
			if _sitematrix.has_key(chapter):
				lang = links.split(":")[0]
				links = links.replace("%s:" % lang, "", 1)
			if not lang: fam = "wikimediafoundation"
		if _sitematrix.has_key(lang) and isinstance(_sitematrix[lang], list):
			if fam not in _sitematrix[lang]:
				if len(_sitematrix[lang]) > 0:
					fam = _sitematrix[lang][0]
				else:
					if self._error == "skip": pass
					else: raise NoSuchSite("All projects of %s language had been closed." % (lang,))
				if self._error=="skip": pass
				else: raise NoSuchSite("%s language has not %s family" % (lang, fam))
		self._sitename = ".".join((lang, fam, dom)) if lang else "%s.%s" % (fam, dom)
		self._lang = lang
		self._fam = fam

		if self._sitename != self._site.sitename():
			self._site = Site(lang, fam)

		if not self._sitename in _sites:
			s = Site(lang, fam)
			try:
				self._site = s.load_siteinfo()
			except urllib2.URLError:
				raise NoSuchSite("%s does not exist or is not reachable." % s.sitename())
			_sites[self._sitename] = self._site

		if self._is_template: self._pref = links
		else: self._link = links

	def fetch_namespace(self):
		for chunk in self._link.split(":"):
			if capitalize(chunk.strip()) in _sites[self._sitename].namespaces():
				self._ns = capitalize(chunk.strip())
				self._link = self._link.replace("%s:" % chunk, "", 1)

		if self._is_template and not self._ns:
			self._ns = _sites[self._sitename].namespaces_by_number()[10][0]

		if self._is_user:
			self._ns = _sites[self._sitename].namespaces_by_number()[2][0]

		if self._ns: self._ns = _sites[self._sitename].namespaces_by_number()[_sites[self._sitename].namespaces()[self._ns]][0]
		if not self._link and not self._ns:
			self._link = "Main Page"

		if self._is_template and not self._pref.endswith("\\") :
			if self._link == "u":
				self._ns = _sites[self._sitename].namespaces_by_number()[2][0]
				self._title = self._params
			elif self._link == "ut":
				self._ns = _sites[self._sitename].namespaces_by_number()[3][0]
				self._title = self._params

	def append_query(self):
		if self._pref:
			if self._pref == "c" and _sites[self._sitename].namespaces()[self._ns] == 2: #_c_ontributions
				self._ns = ""
				self.normalize_title()
				self._title = u"Special:Contributions/%s" % self._title
			elif self._pref=="e": #_e_dit
				self._query= "?action=edit"
			elif self._pref=="d": #_d_iffonly
				self._query= "?diff=cur&diffonly=1"
			elif self._pref=="h":  #_h_istory
				self._query= "?action=history"
			elif self._pref == "j": #_j_avascript
				self._query= "?action=raw&ctype=text/javascript"
			elif self._pref.startswith("l:"): #use_l_ang
				self._query= "?uselang=%s" % self._pref.split(":")[1]
			elif self._pref=="L": #_l_astest diff
				self._query= "?diff=cur&oldid=prev"
			elif self._pref=="n": #_n_o redirect
				self._query= "?redirect=no"
			elif self._pref == "p": #_p_refixindex
				self.normalize_title()
				self._title = u"Special:Prefixindex/%s%s" % (self._ns and "%s:" % self._ns or "", self._title)
				self._ns = ""
			elif self._pref=="R": #_r_ender
				self._query= "?action=render"
			elif self._pref=="r": #r_e_nder and diffonly
				self._query= "?action=render&diff=cur&diffonly=1"

	def slice(self):
		self.clean_link()
		self.fetch_site()
		self.fetch_namespace()
		self._ns = self._ns and self._ns.strip() or ""
		if not self._title: self._title = self._link
		self._title = self._title.strip()
		self._pagename = "%s%s" % ((self._ns and "%s:" % self._ns) or "", self._title)
		self.normalize_title()
		self.append_query()
		self._url = "https://%s/%s/%s%s%s%s" % (
			self._sitename,
			"wiki" if not self._sitename.endswith(".cat") else "viqui",
			"%s:" % quote(self._ns) if self._ns else "",
			wikiquote(self._title),
			self._query or "",
			self._section or ""
		)

	def exists(self):
		return Page(self._site, self._pagename).exists()

	def info(self):
		t=time.time()
		paginfo = Page(self._site, self._pagename).info()
		print paginfo
		paginfo['delay']= "%0.3f" % (time.time()-t,)
		paginfo['size']= paginfo['length'] if not paginfo.has_key("missing") else 0
		return paginfo

class Site(_Surfer):
	def __init__(self, lang="meta", fam="wikimedia"):
		_Surfer.__init__(self)
		self._lang = lang
		self._fam = fam
		self._sitename = self.make_sitename(lang, fam)

	def load_namespaces(self):
		ns_tuple = self._get_namespaces()
		self._namespaces_by_number = ns_tuple[0]
		self._namespaces = ns_tuple[1]
		self._namespaces_by_local_name = ns_tuple[2]
		self._canonical_namespaces = ns_tuple[3]
		return self

	def load_users(self):
		self._users_tuple = self._get_users()
		return self

	def load_siteinfo(self):
		self._get_siteinfo()
		return self

	def make_sitename(self, lang="", fam=""	):
		if not lang: lang = self._lang
		if not fam: fam = self._fam
		if wm_divs.has_key(lang.lower()):
			lang = wm_divs[lang.lower()]
			fam = "wikimedia"

		if not lang and not fam:
			lang = self._lang if self._lang in languages else "meta"
			fam = families[self._fam] if families.has_key(self._fam) else projects["wm"]

		if families.has_key(fam.lower()):
			fam = families[fam.lower()]
		elif projects.has_key(fam.lower()):
			fam = projects[fam.lower()]
		if lang in languages and not families.has_key(fam):
			fam = families[self._fam] if families.has_key(self._fam) else families["w"]
		elif not lang in languages and families.has_key(fam):
			lang = self._lang if self._lang in languages  else "en"

		dom = ""
		if families.has_key(fam):
			dom = "org"
		elif fam == "wikimediafoundation":
			dom = "org"
		elif fam == "mediawiki":
			dom = "org"; lang= "www"
		elif fam == "translatewiki":
			dom="net"
		elif fam == "viquimedia":
			dom = "cat"; lang = "www"
		elif fam == "wikimedia":
			dom = "org"
			chapter = "%swikimedia" % lang
			if not lang: fam = "wikimediafoundation"
		if _sitematrix.has_key(lang) and isinstance(_sitematrix[lang], list):
			if fam not in _sitematrix[lang]:
				#raise NoSuchSite("%s language has not %s family" % (lang, fam))
				if len(_sitematrix[lang]) > 0:
					fam = _sitematrix[lang][0]
				else:
					raise NoSuchSite("All projects of %s language had beem closed." % (lang,))
		sitename = ".".join((lang, fam, dom)) if lang else "%s.%s" % (fam, dom)
		self._lang = lang
		self._fam = fam
		return sitename

	def abredged_sitename(self):
		fam = self._fam
		if families.has_key(fam) and \
		self._lang not in languages:
			self._lang = "en"
		if self._lang in languages and \
		fam not in families:
			fam = "wikipedia"
		if families.has_key(fam):
			fam = families[fam]
		self._fam = fam
		if fam =="wikipedia": fam="wiki"
		elif fam=="wiktionary": fam="wikt"
		elif fam in ("wikibooks","wikiquote","wikinews","wikisource"):
			fam = fam[4:]
		return self._lang+fam

	def reduced_family(self):
		fam = self.fam
		if fam == "wikipedia":
			fam = "w"
		elif fam == "wiktionary":
			fam = "wikt"
		elif fam in ("wikibooks","wikiquote","wikinews","wikisource","wikiversity"):
			fam = fam[5]
		return fam

	def sitename(self):
		return self._sitename

	def language(self):
		return self._lang

	def family(self):
		return self._fam

	def sysops(self):
		return self._users_tuple[0]

	def bots(self):
		return self._users_tuple[1]

	def halfbots(self):
		return self._users_tuple[2]

	def namespaces(self):
		return self._namespaces

	def namespaces_by_local_name(self):
		return self._namespaces_by_local_name

	def namespaces_by_number(self):
		return self._namespaces_by_number

	def canonical_namespaces(self):
		return self._canonical_namespaces

	def get_api(self, params):
		if self._sitename != "www.viquimedia.cat": self._path = "/w/api.php"
		else:
			self._path = "/v/api.php"
			self._headers["Accept-Encoding"]="identity"
		self.make_url()
		if not params.has_key("action"):
			params['action'] = "query"
		params["format"] = "json"
		if self._cookies and not self._headers.has_key("Cookie"):
			self._headers.update(Cookie=self._cookies.encode("utf-8"))
		response, data = self.connection(params)
		return json.loads(data)

	def get_page(self, title):
		"""Get a page from a Wikimedia project"""
		params = {
			"titles": title,
			"prop": "revisions",
			"rvprop": "content",
			"indexpageids": ""
		}
		data = self.get_api(params)['query']
		if data['pageids'][0] == "-1":
			return ""
		return data['pages'][data['pageids'][0]]['revisions'][0]['*']

	def _get_sitematrix(self):
		params = {
			"action": "sitematrix",
		}
		data = self.get_api(params)['sitematrix']
		for site in data:
			if site.isdigit():
				_sitematrix[data[site]['code']] = [x['code'] if x['code'] != "wiki" else "wikipedia" for x in data[site]['site'] if not x.has_key("closed")]
			if site == "specials":
				for special in data[site]:
					_sitematrix[special['code']] = "_sp_" \
					if not special.has_key("private") and not special.has_key("closed") \
					else "_sp_priv" if special.has_key("private")  else "_sp_closed"
		#print json.dumps(_sitematrix, indent=2)
		return _sitematrix

	def _get_namespaces(self):
		ns09={}; nsAZ={}; nsLAZ={}
		params = {
			"meta": "siteinfo",
			"siprop": ("namespaces", "namespacealiases")
		}
		q = self.get_api(params)
		namespaces = q['query']['namespaces']
		namespacealiases = q['query']['namespacealiases']
		for ns in namespaces:
			if namespaces[ns].has_key('canonical'):
				ns09[namespaces[ns]['id']] = [namespaces[ns]['*'], namespaces[ns]['canonical']]
				nsAZ.update({namespaces[ns]['*']: namespaces[ns]['id'], namespaces[ns]['canonical']: namespaces[ns]['id']})
				nsLAZ.update({namespaces[ns]['*']: namespaces[ns]['id']})
		canonical = dict([(k, v[1]) for k, v in ns09.iteritems()])
		for nsa in namespacealiases:
			ns09[nsa['id']].append(nsa['*'])
			nsAZ.update({nsa['*']: nsa['id']})
			nsLAZ.update({nsa['*']: nsa['id']})
		return ns09, nsAZ, nsLAZ, canonical

	def _get_users_by_group(self, group):
		"""
		bot, sysop, bureaucrat, checkuser, steward, accountcreator, import,
		transwiki, ipblock-exempt, oversight, autopatrolled, rollbacker, confirmed
		"""
		users=[]; start = True
		params = {
			"list": "allusers",
			"aulimit": "max",
			"augroup": group,
			"auprop": "groups"
		}
		while start:
			if isinstance(start, basestring):
				params['aufrom'] = start
			q = self.get_api(params)
			data = q['query']['allusers']
			for user in data:
				users.append(user['name'])
			start = q['query-continue']['allusers']['aufrom'] if q.has_key("query-continue") else None
		return users

	def _get_users(self):
		sysops = self._get_users_by_group("sysop")
		bots = self._get_users_by_group("bot")
		halfbots = ["MediaWiki default", "Commons Delinker"]
		if self._lang == "ca":
			halfbots += ["BotPatrulla", "BotReversor"]
		return sysops, bots, halfbots

	def _get_users_by_groups(self, groups):
		"""
		bot, sysop, bureaucrat, checkuser, steward, accountcreator, import,
		transwiki, ipblock-exempt, oversight, autopatrolled, rollbacker, confirmed
		"""
		users=dict(zip(groups,[[]]*len(groups))); start = True
		params = {
			"list": "allusers",
			"aulimit": "max",
			"augroup": groups,
			"auprop": "groups"
		}
		while start:
			if isinstance(start, basestring):
				params['aufrom'] = start
			q = self.get_api(params)
			data = q['query']['allusers']
			for user in data:
				for group in user['groups']:
					if group in groups:
						users[group].append(user['name'])
			start = q['query-continue']['allusers']['aufrom'] if q.has_key("query-continue") else None
		return users

	def _get_bots_and_sysops(self):
		users = self._get_users_by_groups(("sysop","bot"))
		halfbots = ["MediaWiki default", "Commons Delinker"]
		if self._lang == "ca":
			halfbots += ["BotPatrulla", "BotReversor"]
		return users['sysop'], users['bot'], halfbots

	def _get_siteinfo(self):
		"""
		return bot & sysop user groups and namespace names
		"""
		if self._sitename.endswith(".cat"):
			self.load_users()
			self.load_namespaces()
			return  self
		groups=("sysop","bot")
		users={"sysop": [], "bot": []}; start = True
		params = {
			"list": "allusers",
			"aulimit": "max",
			"augroup": groups,
			"auprop": "groups",
		}
		namespaces=None
		ns09={}; nsAZ={}; nsLAZ={}

		while start:
			if not namespaces:
				params["meta"]= "siteinfo"
				params["siprop"]=("namespaces", "namespacealiases")

			if isinstance(start, basestring):
				params['aufrom'] = start
			q = self.get_api(params)
			data = q['query']['allusers']
			for user in data:
				group = [x for x in user['groups'] if x in groups][0]
				users[group].append(user['name'])
			if not namespaces:
				namespaces = q['query']['namespaces']
				namespacealiases = q['query']['namespacealiases']
				for ns in namespaces:
					if namespaces[ns].has_key('canonical'):
						ns09[namespaces[ns]['id']] = [namespaces[ns]['*'], namespaces[ns]['canonical']]
						nsAZ.update({namespaces[ns]['*']: namespaces[ns]['id'], namespaces[ns]['canonical']: namespaces[ns]['id']})
						nsLAZ.update({namespaces[ns]['*']: namespaces[ns]['id']})
				canonical = dict([(k, v[1]) for k, v in ns09.iteritems()])
				for nsa in namespacealiases:
					if ns09.has_key(nsa['id']):
						ns09[nsa['id']].append(nsa['*'])
					else: #I think is a bug
						ns09.update({nsa['id']:[nsa['*']]})
					nsAZ.update({nsa['*']: nsa['id']})
					nsLAZ.update({nsa['*']: nsa['id']})
			start = q['query-continue']['allusers']['aufrom'] if q.has_key("query-continue") else None
		self._namespaces_by_number = ns09
		self._namespaces = nsAZ
		self._namespaces_by_local_name = nsLAZ
		self._canonical_namespaces = canonical
		halfbots = ["MediaWiki default", "Commons Delinker"]
		if self._lang == "ca":
			halfbots += ["BotPatrulla", "BotReversor"]
		self._users_tuple = users['sysop'], users['bot'], halfbots
		return self

	def recent_changes(self):
		params = {
			"list": "recentchanges",
			"rcprop": ("user", "title", "sizes", "redirect", "comment", "timestamp"),
			"rcshow": "anon",
			"rclimit": "max"
		}
 		while start:
			if isinstance(start, basestring):
				params['rcstart'] = start
			q = self.get_api(params)
			start = q['query-continue']['recentchanges']['rcstart'] if q.has_key('query-continue') else None
			for event in query['recentchanges']:
				nl = event['newlen']
				ol = event['oldlen']
				page = event['title']
				user = event['user']
				comment = event['comment']
				timestamp = event['timestamp']

	def statistics(self, item=None):
		params = {
			'action': 'query',
			'meta': 'siteinfo',
			'siprop': 'statistics',
		}
		data = self.get_api(params)
		items=data['query']['statistics']
		if items.has_key(item): return items[item]
		return items

	def get_articles(self):
		params = {
			'action': 'query',
			'meta': 'siteinfo',
			'siprop': 'statistics',
		}
		data = self.get_api(params)
		arts=data['query']['statistics']['articles']
		return arts

	def get_cookies(self):
		home = os.getcwd()
		logindata= os.path.join(home, "login-data")
		if not os.path.exists(logindata):
			os.makedirs(logindata)

		prefix = self.abredged_sitename()
		file = "%s_%s.data" % (prefix, self._user)
		file = os.path.join(logindata,file)

		if os.path.exists(file):
			jsontext = open(file,"r").read()
			cookies = json.loads(jsontext)
			self._cookies = self.set_cookies(cookies)
			return
		else:
			print "no cookies for %s" % self._sitename
			return ""

	def set_cookies(self, data):
		prf=data["cookieprefix"]
		userName = "%sUserName" % prf
		userID = "%sUserID" % prf
		token = "%sToken" % prf
		sessionID = "%s_session"

		c={}
		c[userName] = data["lgusername"]
		c[userID] = data["lguserid"]
		c[token] = data["lgtoken"]
		c[sessionID] = data['sessionid']

		cookies = ["%s=%s" % (k, str(c[k])) for k in c]
		cookies = "; ".join(cookies)
		return cookies

	def save_cookies(self, data):
		home = os.getcwd()
		logindata = os.path.join(home, "login-data")
		if not os.path.exists(logindata):
			os.makedirs(logindata)

		file = "%s_%s.data" % (data['cookieprefix'], data['lgusername'])
		file = os.path.join(logindata,file)

		if not os.path.exists(file):
			f = open(file,"w")
			json.dump(data, f, indent = 4)
			f.close()

	def login(self):
		"""Design logging in Wikimedia projects"""
		user, passw  = self.get_ident()
		params = {"action": "login", "lgname": user, "lgpassword": passw}
		data = self.get_api(params)
		data = data['login']
		if data['result'] == "Success":
			self._user = data['lgusername']
			self.save_cookies(data)
			self._cookies = self.set_cookies(data)
		elif data['result'] == "NeedToken":
			params['lgtoken']=data['token']
			self._headers['Cookie'] = "%s_session=%s; " % (
				data['cookieprefix'],
				data['sessionid']
			)

			data = self.get_api(params)
			data = data['login']
			if data['result'] == "Success":
				self._user = data['lgusername']
				self.save_cookies(data)
				self._cookies = self.set_cookies(data)
			else:
				print data
		else:
			print data

class User:
	def __init__(self, site, account):
		"""
		site is a Site object
		account is the account of the user in the site
		"""
		self._site = site
		self._account = account

	def is_anon(self):
		return is_ip(self._account)

	def info(self, key=None):
		if not is_ip(self._account):
			params = {
				"list": "users",
				"ususers": self._account,
				"usprop": ("editcount","gender")
			}
			data = self._site.get_api(params)['query']['users'][0]
			if data.has_key('editcount'):
				rtn = {'editcount': data['editcount'], 'gender': data['gender']}
			else:
				rtn = {'editcount': 0, 'gender': 'unknown'}
		else:
			ec = 0; ucstart = True
			params = {
				"list": "usercontribs",
				"ucuser": self._account,
				"ucprop": "title",
				"uclimit": "max"
			}
			while ucstart:
				if isinstance(ucstart, basestring):
					params['ucstart'] = ucstart
				q = self._site.get_api(params)
				data = q['query']['usercontribs']
				if data: ec += len(data)
				ucstart = q['query-continue']['usercontribs']['ucstart'] if q.has_key('query-continue') else None
			rtn = {'editcount': ec, 'gender': 'unknown'}
		if not key: return rtn
		else: return rtn[key]

	def registration(self):
		registration = None
		if not is_ip(self._account):
			params = {
				"list": "logevents",
				"leuser": self._account,
				"letype": "newusers",
				"lelimit": "max"
			}
			data = self._site.get_api(params)
			if data.has_key("query"):
				data = data['query']
				for log in data['logevents']:
					if log.has_key("action") and log['action'] == "newusers":
						registration = Time(log['timestamp'])
						break
			elif data.has_key("error"):
				print "unknown user. API error: %s" % data['error']['info']
		return registration

	def about(self, key=None, force_reg=False):
		"""Caution! This action requires at least 2 requests to API.
		Key must be one of these values: 'editcount', 'gender', 'groups', 'registration',
		'last edit', 'last timestamp', 'first edit', 'first timestamp', 'extra'. By default it returns all keys in a dict.
		force_reg must try to retrieve the registration timestamp by another request.
		"""
		if not is_ip(self._account):
			params = {
				"list": ("users", "usercontribs"),
				"ususers": self._account,
				"usprop": ("editcount", "gender", "groups", "registration"),
				"ucuser": self._account,
				"ucprop": ("title", "timestamp"),
				"uclimit": 1,
			}
			data = self._site.get_api(params)['query']
			ui = data['users'][0]
			rtn = {}
			if not ui.has_key("missing"):
				rtn.setdefault("editcount", ui['editcount'])
				rtn.setdefault("gender", ui['gender'])
				groups = ui['groups']
				groups.remove("*")
				rtn.setdefault("groups", ", ".join(groups))
				rtn.setdefault("registration", Time(ui['registration']) if ui['registration'] else None)
				if force_reg:
					rtn.update({"registration": self.registration()})
				if ui['editcount']>1:
					uc = data['usercontribs'][0]
					rtn.setdefault("last edit", uc['title'])
					rtn.setdefault("last timestamp", Time(uc['timestamp']))
					params = {
						"list": ("usercontribs"),
						"ucuser": self._account,
						"ucprop": ("title", "timestamp"),
						"uclimit": 1,
						"ucdir": "newer"
					}
					uc = self._site.get_api(params)['query']['usercontribs'][0]
					rtn.setdefault("first edit", uc['title'])
					rtn.setdefault("first timestamp", Time(uc['timestamp']))
					rtn.setdefault("extra", "OK")
				elif ui['editcount']==1:
					uc = data['usercontribs'][0]
					rtn.setdefault("first edit", uc['title'])
					rtn.setdefault("first timestamp", Time(uc['timestamp']))
					rtn.setdefault("last edit", '')
					rtn.setdefault("last timestamp", "")
					rtn.setdefault("extra", "1contribs")
				elif ui['editcount']==0:
					rtn.setdefault("first edit", "")
					rtn.setdefault("first timestamp", None)
					rtn.setdefault("last edit", '')
					rtn.setdefault("last timestamp", None)
					rtn.setdefault("extra", "0contribs")
			else:
				rtn.setdefault("last edit", '')
				rtn.setdefault("last timestamp", None)
				rtn.setdefault("first edit", '')
				rtn.setdefault("first timestamp", None)
				rtn.setdefault("extra", ui.has_key("missing") and "missing" or "error")

		else:
			ec = 0; ucstart = True; first = None; last = None
			params = {
				"list": "usercontribs",
				"ucuser": self._account,
				"ucprop": ("title", "timestamp"),
				"uclimit": "max"
			}
			while ucstart:
				if isinstance(ucstart, basestring):
					params['ucstart'] = ucstart
				q = self._site.get_api(params)
				data = q['query']['usercontribs']
				if not last and data: last = data[0]['title'], Time(data[0]['timestamp'])
				if data: ec += len(data)
				ucstart = q['query-continue']['usercontribs']['ucstart'] if q.has_key('query-continue') else None
			if ec>1:
				first = data[-1]['title'], Time(data[-1]['timestamp'])
				rtn = {
					'editcount': ec,
					'gender': 'unknown',
					"groups": "anonymous",
					"registration": None,
					"last edit": last[0],
					"last timestamp": last[1],
					"first edit": first[0],
					"first timestamp": first[1],
					"extra": "OK"
				}
			elif ec==1:
				rtn = {
					'editcount': ec,
					'gender': 'unknown',
					"groups": "anonymous",
					"registration": None,
					"last edit": '',
					"last timestamp": None,
					"first edit": last[0],
					"first timestamp": last[1],
					"extra": "1contrib"
				}
			else:
				rtn = {
					'editcount': 0,
					'gender': 'unknown',
					"groups": "anonymous",
					"registration": None,
					"last edit": '',
					"last timestamp": None,
					"first edit": '',
					"first timestamp": None,
					"extra": "missing"
				}

		if not key:	return rtn
		else: return rtn [key]

	def nick(self):
		return self._account

class Page:
	def __init__(self, site, title):
		"""
		site is a Site object.
		title is a unicode string.
		"""
		self._site = site
		self._title = title

	def to_url(self):
		return WikiLink(self._title, self._site._lang, self._site._fam).url()

	def strip_ns(self):
		"""Returns title without namespace."""
		chunks = self._title.split(":", 1)
		if chunks[0] in self._site.namespaces():
			return chunks[1]
		return self._title

	def base_title(self):
		"""Returns base title of the page. Base title is the title of the
		page, without namespace name and without subtitle, if any."""
		bt = self._title
		chunks = bt.split(":", 1)
		if chunks[0] in self._site.namespaces():
			bt = chunks[1]
		bt = bt.split("/")[0]
		return bt

	def namespace(self):
		ns_id = 0
		for ns in self._site.namespaces():
			if self._title.startswith(u"%s:" % ns):
				ns_id = self._site.namespaces()[ns]
				break
		return ns_id

	def canonical_ns(self):
		id = self.namespace()
		if id == 0: return ""
		cns = self._site.canonical_namespaces()
		return cns[id].lower()

	def exists(self):
		"""Returns True if page exists."""
		return int(self.info()['pageid'])>0

	def content(self):
		"""Returns string if page exists, or empty string if doesn't."""
		return self._site.get_page(self._title)

	def info(self):
		"""returns info about the page"""
		params = {
			"indexpageids": "",
			"prop": "info",
			"titles": self._title
		}
		query = self._site.get_api(params)['query']
		pageid = query['pageids'][0]
		return query['pages'][pageid]

	def revisions(self, oldid, diff=None):
		"""Get two revisions, if they are from the same page."""
		params = {
			"indexpageids": "",
			"revids": [diff, oldid] if diff else oldid,
			"prop": "revisions",
			"rvprop": ("content", "user", "timestamp")
		}
		query = self._site.get_api(params)['query']
		if len(query['pageids']) > 1:
			raise NotSamePage
		print "%s %s oldid: %s diff %s" %(__name__, "revisions", oldid, diff)
		if len(query['pageids']) == 0:
			print "%s %i %s %s %s" % (self._site.sitename(), len(query['pageids']), oldid, diff)
			raise NoSuchPage
		pageid = query['pageids'][0]
		if diff:
			newv = query['pages'][pageid]['revisions'][0]['*']
			oldv = query['pages'][pageid]['revisions'][1]['*']
		else:
			newv = ""
			oldv = query['pages'][pageid]['revisions'][0]['*']
		return newv, oldv

	def reverts(self, endid):
		"""Get all revisions since specified endid, then get all contributors,
		keep unique ones but not first user neither last one, who are the revertor
		and the last good contributor."""
		start = True; editors = []
		params = {
			"prop": "revisions",
			"titles": self._title,
			"rvprop": ("user", "id"),
			"rvendid": endid,
			"rvlimit": "max",
			"indexpageids": "",
		}
		while start:
			if isinstance(start, basestring):
				params['rvstartid'] = start
			q = self._site.get_api(params)
			pageid = q['query']['pageids'][0]
			data = q['query']['pages'][pageid]
			start = q['query-continue']['revisions']['rvstartid'] if q.has_key("query-continue") else None
			for rev in data['revisions'][1:-1]:
				editors.append(rev['user'])
		editors = set(editors)
		return editors

	def editors(self):
		"""Get all revisions since specified endid, then get all contributors."""
		if not hasattr(self._site, "_users_tuple"): self._site.load_users()
		start = True; editors = []; n = 0
		params = {
			"prop": "revisions",
			"titles": self._title,
			"rvprop": "user",
			"rvlimit": "max",
			"indexpageids": "",
		}
		while start:
			if isinstance(start, basestring):
				params['rvstartid'] = start
			q = self._site.get_api(params)
			pageid = q['query']['pageids'][0]
			data = q['query']['pages'][pageid]
			start = q['query-continue']['revisions']['rvstartid'] if q.has_key("query-continue") else None
			for rev in data['revisions']:
				if rev['user'] not in self._site.bots() + self._site.halfbots():
					editors.append(rev['user'])
			n+=len(data['revisions'])
		creator = editors[-1]
		eds = set(editors)
		d={}
		for e in eds:
			d.update({e: editors.count(e)})
		return eds, creator, d, n

	def title(self):
		return self._title

_sites = {}
_sitematrix = {}
Site()._get_sitematrix()
if __name__ == '__main__':
	WL = WikiLink
	wl = WL("[[fr:q:Ot]]")
	print WL(u"t[[ca:Peu]]").info()
	print WL(u"t[[ca:París]]").info()
"http://www.viquimedia.cat/v/api.php?format=jsonfm&action=query&augroup=sysop|bot&list=allusers&meta=siteinfo&aulimit=max&siprop=namespaces|namespacealiases&auprop=groups"
