#!/usr/bin/python
# coding:utf-8

""" 
Get a link to wiki page (as parameter, or interactively, 
and gives a markup optimized for wordpress:
1. Replaced wiki latex generated images to native wordpress $latex $ tags.
2. Removes [.edit] links.
3. Points links to right domain (from which was get page).
4. Removes red links.
5. Replaces geshi highlighting to [sourcecode lang=""] tags.

Depends on BeautifulSoup unit.
"""
__author__ = "Bunyk T."

import re
import sys
import urllib2
import BeautifulSoup

user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = { 'User-Agent' : user_agent , "Cache-Control": "no-store, no-cache, must-revalidate"}

def get(url):
	""" returns text on given url. If error returns None """
	req=urllib2.Request(url,None,headers)
	print "Will get",url
	try:
		res=urllib2.urlopen(req)
	except urllib2.URLError, e:
		print tcols.red+"Error in getting %s :" % url ,tcols.none
		return None
	return unicode(res.read(),"utf-8")

def get_source(domain,name):
	text=get(domain+'/w/index.php?title='+name+'&action=raw')
	sources = re.findall('<source lang="(.*?)">(.*?)</source>',text,re.DOTALL)
	return sources

def wiki_filter(ln,domain):
	ln= re.sub(r'<img class="tex" alt="(.*?)" src=".*?" />',r"$latex \1$",ln) # replace latex
	ln= re.sub(r'<span class="editsection">.*?</span>',r"",ln) # remove [.edit] links
	ln= re.sub(r'<!--.*?-->',r"",ln) # remove comments
	ln= re.sub(r'<a href="/wiki/(.*?)"',r'<a href="'+domain+r'wiki/\1"',ln) # point links to right domain
	return ln

START_MARK="<!-- bodytext -->"
FINISH_MARK="<!-- /bodytext -->"
GESHI_MARK="GESHI-HIGHLIGHT"

if len(sys.argv)<2:
	url= raw_input("Page url: ")
else:
	url = sys.argv[1]

#http://uk.wikibooks.org/w/index.php?title=Ruby&oldid=14260
try:
	domain = re.findall("(http://.*?/)wiki/",url)[0]
	page_name = re.findall("http://.*?/wiki/(.*)$",url)[0]
except IndexError:
	domain = re.findall("(http://.*?/)w/index.php",url)[0]
	page_name = re.findall("http://.*?/w/index.php.*title=(.*?)&.*",url)[0]

doc = get(url)

has_source = False

soup=BeautifulSoup.BeautifulSoup(doc)
geshis = soup.findAll("div",{"class":"mw-geshi"}) # replace geshi highlight to mark
for i in geshis:
	i.replaceWith(GESHI_MARK)
	has_source = True

red_links = soup.findAll("a",{"class":"new"}) # Remove red links
for i in red_links:
	i.replaceWith(i.renderContents())

if has_source:
	sources_list = get_source(domain,page_name) # get sources used on page
current_source = 0

doc = str(soup).splitlines()


inbody = False
for line in doc:
	if re.search(FINISH_MARK,line):
		break
	if re.search(GESHI_MARK,line):
		print "[sourcecode language='"+sources_list[current_source][0]+"']",
		print sources_list[current_source][1],
		print "[/sourcecode]"
		current_source+=1
		continue
	if inbody:
		print wiki_filter(line,domain)
		continue
	if re.search(START_MARK,line):
		inbody = True
