import urllib2
import datetime
from datetime import timedelta
import time
from xml.dom import minidom
import xml.etree.ElementTree as etree
import os
import os.path
import sys
import logging

class Tag(object):
    def __init__(self,name,count):
        self.count = count
        self.name = name

    def get_name(self):
        return self.name

    def get_count(self):
        return self.count

class GrabTags(object):
    def __init__(self,logger,artist,album=None):

        self.LOGGER = logger
        self.TIMESTAMP = time.time()
        self.MAX_CACHE_AGE=(timedelta(days=30))

        if artist is not None:
            # fetch tags for artist and album (if applicable)
            raw_tags = self.get_tags("queen")
            if raw_tags == False:
                self.LOGGER.critical("tags could not be downloaded from last.fm. exiting.")
                exit(1)

            # parse the resulting xml
            parsed_tags = self.parse_tags(raw_tags['filename'])
            if parsed_tags == False:
                self.LOGGER.info("No genres available for tagging.")
                exit(0)

            # making this a dict so the order received from last.fm can
            # be preserved & randomly accessed. could stand to be changed
            # to a tuple, i guess...
            self.tags = {}
            i = 0
            for key, tag in parsed_tags.items():
                self.tags[i] = Tag(tag['name'],tag['count'])
                i += 1
        else:
            self.LOGGER.critical("no artist supplied for tag retrieval")
            exit(2)

    def tags(self):
        return self.tags

    def get_tags(self,artist,album=None,API_KEY="53188bc319825c76787f02c8f50bafcc"):
        self.LOGGER.info("Artist: %s" % artist)
        self.LOGGER.info("Album: %s" % album)
        self.LOGGER.debug("API_KEY: %s" % API_KEY)

        # set separate urls for artist and album searches
        url=""
        cache=""
        if artist is not None:
            if album is not None:
                cache="%(artist)s.%(album)s.cache" % \
                    {
                        'artist': artist,
                        'album': album,
                    }
                url="http://ws.audioscrobbler.com/2.0/?method=album.gettoptags&artist=%(artist)s&album=%(album)s&API_KEY=%(API_KEY)s" % \
                {
                    "artist": artist,
                    "album": album,
                    "API_KEY": API_KEY
                }
            else:
                cache="%(artist)s.cache" % \
                    {
                        'artist': artist,
                    }
                url="http://ws.audioscrobbler.com/2.0/?method=artist.gettoptags&artist=%(artist)s&API_KEY=%(API_KEY)s" % \
                    {
                        "artist": artist,
                        "API_KEY": API_KEY
                    }

        self.LOGGER.debug("cache file: %s" % cache)
        self.LOGGER.debug("api url: %s" % url)

        xml = self.download(url.lower(),cache)

        if xml != False: return {'filename': cache, 'xml': xml}
        else: return False

    def parse_tags(self,filename,top=30):
        main_tree = etree.parse(filename)
        toptags = main_tree.find("toptags")
        tags = toptags.findall("tag")
        self.LOGGER.debug("tags found")
        #self.LOGGER.debug("parse_tags: tags: %s" % tags)

        tag_items = {}
        if top == 0:
            top = 256
        self.LOGGER.info("parsing top %s tags..." % top)
        i=0
        while (i < top):
            if len(tags) == i: break
            name = tags[i].find("name")
            count = tags[i].find("count")
            tag = {'number': (i+1), 'name': name.text, 'count': count.text}
            self.LOGGER.debug("tag built: %s" % tag)
            tag_items[(i+1)] = (tag)
            i += 1

        if i > 0:
            self.LOGGER.info("%s tags parsed successfully." % i)
            return tag_items
        else:
            self.LOGGER.warning("No tags could be parsed. Could be a download problem or, more likely, the album/artist information retrieved from Last.FM did not contain any tags.")
            return False

    def download(self,url,filename):
        xml=self.check_cache(filename)

        if xml == False or xml == "":
            self.LOGGER.info("attempting tag download...")
            try:
                response = urllib.request.urlopen(url)
                xml = response.read()
            except:
                self.LOGGER.error("the url opener failed to initialise.")
                return False

            self.LOGGER.debug("attempting to open cache file...")
            try:
                out_file = open(filename,'wb')
            except:
                self.LOGGER.error("the cache file could not be opened.")
                return False

            # handy lines if you're having request trouble but very, very spammy
            #self.LOGGER.debug("xml request results follow...")
            #self.LOGGER.debug(xml)

            # dump requested xml to temp file
            self.LOGGER.debug("writing tags to cache file...")
            out_file.write(xml)
            out_file.close()

        self.LOGGER.info("tag download complete.")
        return xml

    def check_cache(self,filename):
        if os.path.exists(filename):
            if timedelta(seconds=(os.path.getmtime(filename) - self.TIMESTAMP)) < self.MAX_CACHE_AGE:
                self.LOGGER.debug("existing cache found, and its still fresh. reading xml from it.")
                try:
                    in_file = open(filename,'r')
                    xml = in_file.read()
                    # handy lines if you're having request trouble but very, very spammy
                    #self.LOGGER.debug("xml request results follow...")
                    #self.LOGGER.debug(xml)
                    if xml == "":
                        self.LOGGER.warning("couldn't read from existing cache. deleteing it and downloading a fresh copy")
                        os.remove(filename)
                        return False
                    self.LOGGER.info("tags read from existing cache.")
                    return xml
                except:
                    self.LOGGER.warning("couldn't read from existing cache. deleteing it and downloading a fresh copy")
                    os.remove(filename)
                    return False
            else:
                self.LOGGER.debug("existing cache found, but it's old. deleteing cache and downloading a fresh one")
                os.remove(filename)
                return False
        else:
            self.LOGGER.debug("no cache file found. downloading a fresh copy.")
            return False


