﻿# -*- coding: utf-8 -*-

import sys
import xbmc, xbmcgui, xbmcplugin, xbmcaddon
import urllib2, re
from bs4 import BeautifulSoup

# plugin handle
handle = int(sys.argv[1])
PARAMETER_KEY_MODE = "mode"
addon = xbmcaddon.Addon('script.tportal.hr')
''' Define home screen categories '''
HOME_CATEGORIES = [
			['Vijesti', 'vijesti'],
			['Sport', 'sport'],
			['Biznis', 'biznis'],
			['Sci/Tech', 'scitech'],
			['Komentari', 'komentari'],
			['Lifestyle', 'lifestyle'],
			['Showtime', 'showtime'],
			['Kultura', 'kultura'],
			['Gamereport', 'gamereport'],
			['Funbox', 'funbox'],
			['Supermen', 'supermen'],
			['Plan B', 'planb'],
			['Teen', 'teen'],
]


''' Window for reading article '''
def readArticle(article_id):
	url = 'http://m.tportal.hr/' + article_id
	article = getArticle(url)
	view_article_window = xbmcgui.WindowXML('ViewArticle.xml', addon.getAddonInfo('path'))
	view_article_window.setProperty('article_headline', article[0])
	view_article_window.setProperty('article_published', article[2])
	view_article_window.setProperty('article_author', article[3])
	view_article_window.setProperty('article_photographer', article[4])
	view_article_window.setProperty('article_image', article[5])
	view_article_window.setProperty('article_text', article[6])
	view_article_window.doModal()

''' Get webpage contents by url as argument '''
def getWebpage(url):
	req = urllib2.Request(url)
	req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3 Gecko/2008092417 Firefox/3.0.3')
	response = urllib2.urlopen(req)
	webpage=response.read()
	response.close()
	return webpage

''' Scrape contents of webpage and populate list with each item of contents '''	
def getArticle(url):
	soup = BeautifulSoup(getWebpage(url))
	article_headline = soup.find('div', 'clanak').h3.getText()
	article_description = soup.find('p', 'articleLead').getText()
	article_published = soup.find('p', class_ = 'ts').getText()
	article_author = soup.find('p', 'autor').getText()
	article_photographer = soup.find('p', 'foto').getText()
	article_image = 'http://m.tportal.hr' + soup.find('img', class_ = 'leadImg')['src']
	article_text = soup.find('div', 'articleText')
	for i in range (0, len(article_text)):
		if article_text('a', class_ = 'vvinner'):
			article_text.find('a', class_ = 'vvinner').decompose()
	article_text = article_text.getText() + '\r\n\r\n'	
	article = [
                article_headline,
                article_description,
                article_published,
                article_author,
                article_photographer,
                article_image,                
                article_text
        ]
	return article
	
''' Convert parameters encoded in a URL to a dict. '''
def parametersStringToDict(parameters):
    paramDict = {}
    if parameters:
        paramPairs = parameters[1:].split("&")
        for paramsPair in paramPairs:
            paramSplits = paramsPair.split('=')
            if len(paramSplits) == 2:
                paramDict[paramSplits[0]] = paramSplits[1]
    return paramDict

''' Populate first category menu '''
def populateCategories(categories):
	n = len(categories)
	for k in range (0,n):
		listItem = xbmcgui.ListItem(categories[k][0])
		url = sys.argv[0] + '?' + 'mode=%s' % categories[k][1]
		xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=listItem, isFolder=True)
	xbmcplugin.endOfDirectory(handle)

''' Populate article headlines menu '''
def articleHeadlinesMenu(article_headlines):
	n = len(article_headlines)
	for k in range (0,n):
		listItem = xbmcgui.ListItem(article_headlines[k][0], iconImage = article_headlines [k][2])
		url = sys.argv[0] + '?' + 'mode=%s' % article_headlines[k][1]
		xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=listItem, isFolder=False)
	xbmcplugin.endOfDirectory(handle)

''' Scrape article headlines '''	
def scrapeHeadlines(categorie):
	if categorie == 'komentari':
		url='http://m.tportal.hr/' + categorie
	else:
		url='http://m.tportal.hr/' + categorie + '?tab=1'
	soup = BeautifulSoup(getWebpage(url))
	mainnews_raw = soup('div', class_= 'mainnews')
	link_id = mainnews_raw[0].a['href']
	if categorie == 'komentari':
		img_link = 'http://m.tportal.hr' + mainnews_raw[0].img['src']
	else:
		img_link = 'http://m.tportal.hr' + mainnews_raw[0].img['data-srcfull']
	headline = mainnews_raw[0].h2
	if headline('span'):
		headline.span.decompose()
	headline = headline.getText()	
	headlines = [[headline, link_id, img_link]]
	headlines_raw = soup('div', class_= 'vijest wpic')
	l = len(headlines_raw)
	for i in range (0,l):
		link_id = headlines_raw[i].a['href']
		img_link = 'http://m.tportal.hr' + headlines_raw[i].img['data-srcfull']
		headline = headlines_raw[i].h3
		if headline('span'):
			headline.span.decompose()
		headline = headline.getText()
		headlines.append([headline, link_id, img_link])
	return headlines

''' Main program '''		
if __name__ == '__main__':	

	params = parametersStringToDict(sys.argv[2])
	mode = params.get(PARAMETER_KEY_MODE, "0")
	
	if not sys.argv[2]:
		populateCategories(HOME_CATEGORIES)
	elif mode in ('vijesti', 'sport', 'biznis', 'scitech', 'komentari', 'lifestyle', 'showtime', 'kultura', 'funbox', 'gamereport', 'planb', 'teen', 'supermen'):
		articleHeadlinesMenu(scrapeHeadlines(mode))
	else:
		readArticle(mode)
