#!/usr/bin/python
# -*- coding: utf-8 -*-

#$ -l h_rt=0:30:00
#$ -l virtual_free=100M
#$ -l arch=sol
#$ -o $HOME/refreshfeeds.out
#$ -e $HOME/refreshfeeds.err
#$ -N refreshfeeds

import sys
#sys.path.append('/home/artem/pywikipedia')
#sys.path.append('/home/artem/wikitools')
sys.path.append('/home/artem/Dropbox/wiki/bot/pywikipedia')

import wikipedia
import catlib
import query

import datetime
import iso8601

import urllib
import lxml.etree as etree

def getDate(n):
	return datetime.date.today() - datetime.timedelta(days=n)

MonthName = [u'января', u'февраля', u'марта', u'апреля', u'мая', u'июня', u'июля', u'августа', u'сентября', u'октября', u'ноября', u'декабря']

def getPageId(title):
	params = {'action': 'query', 'titles': title}
	response = query.GetData(params, site)
		
	return response['query']['pages'].keys()[0]
	
def getMainCategory(pageid):
	params = {'action': 'query', 'prop': 'categories', 'pageids': pageid}
	
	while True:
		response = query.GetData(params, site)
		cats = response['query']['pages'][pageid]['categories']
		for cat in cats:
			if cat['title'].replace(u'Категория:', '') in (u'Политика', u'Экономика', u'Общество', u'Культура', u'Спорт', u'Наука и технологии'):
				return cat['title'].replace(u'Категория:', '')
			else:
				continue
		
		if 'query-continue' in response:
			params['clcontinue'] = response['query-continue']['categories']['clcontinue']
		else:
			break
	
	return None

def getPlainContent(pageid, revid):
	params = {
		'action'     : 'query',
		'revids'     : revid,
		'prop'       : 'extracts',
		'exintro'    : '',
		'explaintext': ''
		}
	response = query.GetData(params, site)
	
	return response['query']['pages'][pageid]['extract']

def getContent(pageid, revid):
	params = {
		'action'     : 'query',
		'revids'     : revid,
		'prop'       : 'extracts',
		'exintro'    : ''
		}
	response = query.GetData(params, site)
	
	return response['query']['pages'][pageid]['extract']

def getThumbURLandMIME(image):
	params = {'action': 'query', 'titles': image.title(), 'prop': 'imageinfo', 'iiprop': 'mime|url|size', 'iiurlwidth': '600'}
	response = query.GetData(params, site)
	
	if response['query']['pages']['-1']['imageinfo'][0]['width'] > 600:
		return response['query']['pages']['-1']['imageinfo'][0]['thumburl'], response['query']['pages']['-1']['imageinfo'][0]['mime']
	else:
		return response['query']['pages']['-1']['imageinfo'][0]['url'], response['query']['pages']['-1']['imageinfo'][0]['mime']

def published(pageid):
	"""Return True if page is in 'Категория:Публиковать'. Return False otherwise"""
	params = {
		'action'      : 'query',
		'prop'        : 'categories',
		'pageids'     : pageid,
		'clcategories':u'Категория:Опубликовано'
		}
	response = query.GetData(params, site)
	if 'categories' in response['query']['pages'][pageid].keys():
		return True
	else:
		return False

def blacklisted(pageid):
	"""Return True if page is in either 'Категория:Не публиковать' or 'Категория:Редактируется'. Return False otherwise"""
	params = {'action': 'query', 'prop': 'categories', 'pageids': pageid, 'clcategories': u'Категория:Не публиковать|Категория:Редактируется'}
	response = query.GetData(params, site)
	if 'categories' in response['query']['pages'][pageid].keys():
		return True
	else:
		return False

def getLatestNews(num_days):
	"""Return dictionary {'pageid': {'page: page}} for all published and not blacklisted news placed in categories for last 'num_days' days"""
	pages_dict = {}
	for n in range(num_days):
		date = getDate(n)
		pages = catlib.Category(site, u'Категория:' + str(date.day) + ' ' + MonthName[date.month-1] + ' ' + str(date.year)).articles()
		for page in pages:
			pageid = getPageId(page.title())
			if published(pageid) and not blacklisted(pageid):
				pages_dict[pageid] = {'page': page}
	
	return pages_dict

def getData(pages_dict):
	"""Add to provided dictionary the following keys: 'title', 'author', 'category', 'images', 'pubdate', 'content', 'content plain'"""
	print 'Getting data...'
	getTitles(pages_dict)
	getAuthors(pages_dict)
	getCategories(pages_dict)
	getImages(pages_dict)
	getPubDates(pages_dict)
	getContents(pages_dict)

def getTitles(pages_dict):
	print 'Getting titles...'
	params = {
		'action' : 'query',
		'pageids': '|'.join(pages_dict.iterkeys())
		}
	response = query.GetData(params, site)
	
	for pageid, page_data in pages_dict.iteritems():
		page_data['title'] = response['query']['pages'][pageid]['title']

def getAuthors(pages_dict):
	print 'Getting authors...'
	for pageid, page_data in pages_dict.iteritems():
		page_data['author'] = page_data['page'].getCreator()[0]

def getCategories(pages_dict):
	print 'Getting categories...'
	for pageid, page_data in pages_dict.iteritems():
		page_data['category'] = getMainCategory(pageid)

def getImages(pages_dict):
	print 'Getting images...'
	for pageid, page_data in pages_dict.iteritems():
		images = []
		for image in page_data['page'].imagelinks():
			try:
				images.append(getThumbURLandMIME(image))
			except:
				continue
		page_data['images'] = images

def getPubDates(pages_dict):
	print 'Getting publication dates...'
	params = {
		'action'      : 'query',
		'prop'        : 'categories',
		'pageids'     : '|'.join(pages_dict.iterkeys()),
		'clcategories':u'Категория:Опубликовано',
		'clprop'      : 'timestamp'
		}
	
	while True:
		response = query.GetData(params, site)
		for pageid, sub in response['query']['pages'].iteritems():
			if 'categories' in sub:
				pages_dict[pageid]['pubdate'] = response['query']['pages'][pageid]['categories'][0]['timestamp']
		
		if 'query-continue' in response:
			params['clcontinue'] = response['query-continue']['categories']['clcontinue']
		else:
			break

def getContents(pages_dict):
	print 'Getting contents...'
	params = {
		'action' : 'query',
		'pageids': '|'.join(pages_dict.iterkeys()),
		'prop'   : 'flagged'
		}
	response = query.GetData(params, site)
	
	for pageid, page_data in pages_dict.iteritems():
		stable_revid = response['query']['pages'][pageid].get('flagged', {}).get('stable_revid', -1)
		
		if stable_revid > 0:
			content = getContent(pageid, stable_revid)
			content = content.replace(content.splitlines()[0] + '\n', '')
			page_data['content'] = content
			
			content = getPlainContent(pageid, stable_revid)
			content = content.replace(content.splitlines()[0] + '\n', '')
			page_data['content plain'] = content
	
def updateRSS(pages_dict):
	"""Updates RSS feed at [[Викиновости:RSS-поток]]"""
	print 'Updating RSS'
	
	root = etree.XML('<?xml version="1.0"?>\n<rss version="2.0">\n</rss>')
	tree = etree.ElementTree(root)
	rss = tree.getroot()

	channel = etree.SubElement(rss, 'channel')
	title =  etree.SubElement(channel, 'title')
	title.text = u'Русские Викиновости'
	link =  etree.SubElement(channel, 'link')
	link.text = u'http://ru.wikinews.org/'
	description =  etree.SubElement(channel, 'description')
	description.text = u'Свободный источник новостей'
	image = etree.SubElement(channel, 'image')
	image_url = etree.SubElement(image, 'url')
	image_url.text = 'http://upload.wikimedia.org/wikipedia/commons/thumb/2/24/Wikinews-logo.svg/100px-Wikinews-logo.svg.png'
	image_title = etree.SubElement(image, 'title')
	image_title.text = u'Русские Викиновости'
	image_link = etree.SubElement(image, 'link')
	image_link.text = 'http://ru.wikinews.org/'
	image_width = etree.SubElement(image, 'width')
	image_width.text = '100'
	lang = etree.SubElement(channel, 'language')
	lang.text = 'ru'
	copy = etree.SubElement(channel, 'copyright')
	copy.text = u'Русские Викиновости — Russian Wikinews.\nТекст доступен по лицензии <a href="http://ru.wikisource.org/wiki/Creative_Commons_Attribution_2.5_Generic">Creative Commons Attribution 2.5 Generic</a>.\nАвторские права на медиафайлы могут отличаться.'
#	builddate = etree.SubElement(channel, 'lastBuildDate')
#	builddate.text = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
	
	for pageid in sorted(latest_news.keys(), reverse=True):
		page_data = latest_news[pageid]
		# check if there is a stable revision
		if 'content' in page_data:
			item = etree.Element('item')
			
			title = etree.SubElement(item, 'title')
			title.text = page_data['title']
			
			guid = etree.SubElement(item, 'guid')
			guid.set('isPermaLink', 'true')
			guid.text = u'http://ru.wikinews.org/w/index.php?curid=' + pageid
			
			link = etree.SubElement(item, 'link')
			link.text = u'http://ru.wikinews.org/w/index.php?curid=' + pageid + u'&stable=1'
			
			pdalink = etree.SubElement(item, 'pdalink')
			pdalink.text = u'http://ru.m.wikinews.org/w/index.php?curid=' + pageid + u'&stable=1'
			
			author = etree.SubElement(item, 'author')
			author.text = page_data['author']
			
			if page_data['category'] != None:
				category = etree.SubElement(item, 'category')
				category.text = page_data['category']
			
			descr = etree.SubElement(item, 'description')
			descr.text = page_data['content']
			
			for image in page_data['images']:
				enclosure = etree.SubElement(item, 'enclosure')
				enclosure.set('url', image[0])
				enclosure.set('type', image[1])
			
			publish_time = iso8601.parse_date(page_data['pubdate'])
			pubDate = etree.SubElement(item, 'pubDate')
			pubDate.text = publish_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
			
			comments = etree.SubElement(item, 'comments')
			comments.text = u'http://ru.wikinews.org/wiki/' + urllib.quote('Комментарии:' + page_data['title'].encode('utf8'))
					
			channel.append(item)
	
#	tree.write('/home/artem/public_html/ru_wikinews_rss.xml', encoding='UTF-8', xml_declaration=True, pretty_print=True)

	rss_page = wikipedia.Page(site, u'Викиновости:RSS-поток')
	rss_page.put('<?xml version="1.0"?>\n' + etree.tostring(tree, pretty_print=True, encoding=unicode), comment=u'Автоматическое обновление RSS-ленты', sysop=True)

def pingFeedBurner():
	"""pinging FeedBurner"""
	print 'Pinging FeedBurner...'
	urllib.urlopen('http://feedburner.google.com/fb/a/pingSubmit?bloglink=http%3A%2F%2Ffeeds.feedburner.com%2FRuWikinewsLatestNews%3Fformat%3Dxml')

def updateTemplate(latest_news):
	"""Update template [[Шаблон:Актуальные Викиновости/Список]] at ru.wp"""
	print 'Updating Template'
	
	content = u"<noinclude><div style=\"color:#000; background-color:#faa; font-size:120%;\">Этот список обновляется автоматически ботом {{u|AKBot}}. Пожалуйста, во избежание дублирования не добавляйте сюда ссылки вручную!</div></noinclude>\n"
	for pageid in sorted(latest_news.keys(), reverse=True)[0:12]:
		content += u"* {{news|" + latest_news[pageid]['title'] + u"}}\n"

	wpsite = wikipedia.getSite('ru', 'wikipedia')
	page = wikipedia.Page(wpsite, u'Шаблон:Актуальные Викиновости/Список')
	
	page.put(content, comment=u'Автоматическое обновление списка актуальных викиновостей')

def Purge(titles):
	import query
	params = {'action':'purge', 'titles':titles}

site = wikipedia.getSite()

latest_news = getLatestNews(5)

getData(latest_news)

updateRSS(latest_news)
pingFeedBurner()

updateTemplate(latest_news)

date = getDate(0)
Purge(u'Заглавная страница|Викиновости:Новости за последние 45 дней|Категория:' + str(date.day) + ' ' + MonthName[date.month-1])
