from hashlib import md5
from rsspackrat.main import settings
from rsspackrat.main.event import notify, EVT_JOB_STARTED, EVT_JOB_FINISHED, \
    EVT_JOB_SKIPPED, EVT_JOB_FAILED, EVT_WORK_FINISHED, EVT_RECEIVED_FEED_INFO, \
    EVT_NEW_UNREAD_ITEM
from rsspackrat.main.log import logger
from rsspackrat.main.storage import database, UNREAD
from rsspackrat.main.util import normalize_string
from threading import Thread
import time
import feedparser
import lxml.html
import lxml.html.soupparser
import metakit
import os
import random
import urllib


class RSSFeedNotFound(Exception):
    """No rss feed could be found"""


def get_feed_info(url):
    """Trigger a process that gets the info about a feed"""
    
    def task():
        info = extract_feed_info(url)
        notify(EVT_RECEIVED_FEED_INFO, info)

    Thread(target=task).start()


def notimeout(func):
    def inner(*args, **kw):
        while True:
            try:
                res = func(*args, **kw)
            except:
                logger.warning("Exception while executing %s with %s, %s" % (func.__name__, args, kw))
                time.sleep(2)
                continue
            return res
    return inner

@notimeout
def extract_feed_info(url):
    """Find the rss feed link in an html page"""
    
    logger.info(u"Getting info for " + url)
    req = urllib.urlopen(url)
    
    if 'xml' in req.info()['content-type']: #page is RSS
        feed = feedparser.parse(req)
        try:
            title = feed.channel.title
        except AttributeError:
            logger.warning(u"Could not find a valid title for feed at %s" % url)
            title = url
        feedurl = url
        return (title, feedurl)
    else:                                   #page is HTML, need to locate feed
        etree = lxml.html.soupparser.parse(req)
        links = etree.xpath("//link[@type='application/rss+xml']")
        if not links:
            raise RSSFeedNotFound
        url = links[0].get('href')  #XXX: maybe offer a choice? 
        return extract_feed_info(url)
    

def process_all_feeds():
    """This triggers the feed processing system for all the existing feeds"""
    
    def task():
        for row in database.get_feeds():
            notify(EVT_JOB_STARTED, row.url)
            process_feed(row)
        
        notify(EVT_WORK_FINISHED)
        
    Thread(target=task).start()

    
def process_one_feed(feedinfo):
    """ Trigger the processing of a particular feed"""
    
    def task():
        notify(EVT_JOB_STARTED, feedinfo.url)
        process_feed(feedinfo)
        notify(EVT_WORK_FINISHED)
        
    Thread(target=task).start()

@notimeout        
def process_feed(feedinfo):
    """Download the latest updates and process all entries"""
    
    req = urllib.urlopen(feedinfo.url)
    feed = feedparser.parse(req)
    for entry in feed['entries']:
        process_entry(feedinfo, entry)
        
            
def process_entry(feedinfo, entry):
    """Take a feedparser generated entry, extract and archive the images"""
    try:
        guid = entry.guid
    except:
        guid = entry.link
    logger.debug(u"Processing entries at guid %s" % guid)
 
    archive = database.get_archived_by_guid(guid)
    if archive is not None:
        logger.info(u"Skipping entries in %s, already archived" % guid)
        return

    summary = get_entry_content(entry)
    if summary is None:
        logger.warning(u"Could not find content for entry %s" % guid)
        return
        
    etree = lxml.html.soupparser.fromstring(summary)
    imgs = etree.xpath('//img')
    for e in imgs:
        url = e.get('src')
        if not url.startswith('http'):
            logger.warning(u"Skipping invalid image url %s" % url)
            continue
        archive_image(feedinfo, url, guid)


def get_entry_content(entry):
    """parse the feedparser generated entry to find out which is the content
    of the feed
    
    The problem is complicated by the fact that this function should work
    with multiple types of feeds (and feedparser doesn't return a uniform interface
    and also support akregator metakit archives.
    """
    if not len(dir(entry)): #we're dealing with a metakit archive
        if hasattr(entry, 'description'):
            return entry.description
        else:
            logger.warning("This metakit archive entry doesn't have a description")
            return None 
    
    if 'content' in entry:  #XXX: should check for rss format...
        return entry['content'][0]['value']
    elif 'summary_detail' in entry:
        return entry['summary_detail']['value']
    elif hasattr(entry, 'description'):   #akregator metakit archive entry
        return entry.description
    else:
        return None


def get_archive_dir(feedinfo):
    title = feedinfo.title
    archive_dir = settings.load()['archive_location']
    location = os.path.join(archive_dir + os.path.sep, normalize_string(title))
    if not os.path.exists(location):
        os.makedirs(location)
    return location + os.path.sep


@notimeout
def archive_image(feedinfo, url, guid):
    logger.debug(u"Processing image url " + url)
    archive = database.get_archived_by_url(url) #check if archived by url
    if archive is not None:
        logger.debug(u"Skipping %s, already downloaded (detected by url)" % url)
        archive.guid = guid
        database.save_changes()
        notify(EVT_JOB_SKIPPED, archive.location)
        return
    
    logger.info(u"Retrieving " + url)
    dest = get_archive_dir(feedinfo)
    req = urllib.urlopen(url)
    if req.info().subtype not in ('jpeg', 'jpg', 'png', 'gif', 'bmp'):
        notify(EVT_JOB_FAILED, url)
    content = req.read()
    
    hash = md5(content).hexdigest()
    
    archive = database.get_archived_by_uid(hash)    #check if archived by hash
    if archive is not None:
        logger.debug(u"Skipping %s, already downloaded (detected by hash)" % url)
        #XXX: I should also update url here, important
        archive.guid = guid
        database.save_changes()
        notify(EVT_JOB_SKIPPED, archive.location)
        return 
        
    if url.endswith('/'):
        fname = hash + '.' + req.info().subtype
    else:
        fname = normalize_string(url.split('/')[ - 1])
        
    fpath = os.path.join(dest, fname)
    logger.debug(u"Saving image " + fpath)
    
    if os.path.exists(fpath):
        logger.debug(u"Skipping %s, file with this name already exists" % url)
        notify(EVT_JOB_SKIPPED, fpath)
        database.archive_file(uid=hash, location=fpath, url=url, 
                              feedurl=feedinfo.url, guid=guid)
        return
    
    f = open(fpath, 'w')
    f.write(content)
    f.close()
    notify(EVT_JOB_FINISHED, fpath)
    notify(EVT_NEW_UNREAD_ITEM, feedinfo)
    database.archive_file(uid=hash, location=fpath, url=url, 
                          feedurl=feedinfo.url, guid=guid)
    
    
def import_akregator_archive(feedinfo, path):
    """Trigger the processing of an akregator metakit file archive"""
    
    def task():
        storage = metakit.storage(open(path))
        archive = storage.getas(storage.description())
        for entry in archive:
            process_entry(feedinfo, entry)
        notify(EVT_WORK_FINISHED)
            
    Thread(target=task).start()
        
    
class SourceNavigator(object):
    
    def __init__(self, feedinfo, status=UNREAD):
        self.feedinfo = feedinfo
        self.position = - 1
        self.files = database.get_feed_archive(feedinfo.url, status)
        
    def first(self):
        if self.files:
            self.position = 0
            return self.files[self.position].location
    
    def last(self):
        if self.files:
            self.position = len(self.files) - 1
            return self.files[ - 1].location
    
    def next(self):
        if self.files:
            if not((self.position + 1) >= len(self.files)):
                self.position += 1
                return self.files[self.position].location
    
    def previous(self):
        if self.files:
            if not((self.position - 1) < 0):
                self.position -= 1
                return self.files[self.position].location
            
    def random(self):
        if self.files:
            self.position = random.randint(0, len(self.files) - 1)
            return self.files[self.position].location
        
    def goto(self, position):
        if self.files:
            self.position = position
            return self.files[self.position].location
        
    def current(self):
        if self.files:
            return self.files[self.position].location
        
#        s = StringIO(); s.write(summary); s.seek(0)        
#        entry_title = entry.title   #XXX: will need to use this