#!/usr/bin/python
# coding: utf-8
from wikiqueries import *
import lexer
#import iwikidict
import codecs
import translatesegment


def link_translate(text):
	try:
		#res=translation_by_iwiki(text,'en','uk')
		res = translatesegment.translate(text)
	except:
		return text
	if res:
		return res
	else:
		return text

def translate(text,page_name,page_name_translation=""):
	text = toU(text)
	tokens,rest=lexer.parser(text)
	if len(rest)>0:
		print "Обережно: Недопарсено"
		print tokens[-3:]
		print rest[:100]
		exit()
	res = []
	all_links = 0
	translated_links = 0
	for i in tokens:
		if i[0]==lexer.LINK:
			all_links+=1
			tran = toU(link_translate(i[1]))
			if i[1][:len("Category:")]=="Category:":
				newlink = (i[0],tran,tran)
			else:
				newlink = (i[0],tran,i[2])
			res.append(lexer.token_text(newlink))
		elif i[0]==lexer.HEADER:
			#tr = dict_en_uk.select_translation(i[1],True)
			tr = i[1]
			res.append(lexer.token_text((i[0],tr,i[2])) )
		else:
			res.append(lexer.token_text(i))

	res.append(u"\n[[en:"+toU(page_name)+u"]]")
	print "Translated: %d / %d" % (translated_links, all_links)
	return res


if __name__=="__main__":
	from highlighter import print_highlighted as printh
	pn=raw_input("Input page name:")
	if pn=="":
		pn="Easy A"
	text = page_source("http://en.wikipedia.org",pn)
	res= translate(text,pn)
	lex,rest=lexer.parser("".join(res))
	printh(lex)
	f=codecs.open("transbuf.txt","a","utf-8")
	f.write("".join(res))
	f.close();
