# AniDB API version, always be a float
VERSION = 0.1

from whoosh.fields import Schema, TEXT, NUMERIC, KEYWORD
from whoosh.index import open_dir, create_in, EmptyIndexError
from whoosh.qparser import QueryParser
from whoosh.filedb.multiproc import MultiSegmentWriter
from multiprocessing import cpu_count
from threading import Lock
from time import time, sleep
import os.path
import logging
import config
import tools
try:
    from xml.etree import cElementTree as ElementTree
except (ImportError):
    from xml.etree import ElementTree

class SearchInvalid(Exception):
    """Exception raised when there are problems with the search engine"""
    pass
class ParseError(Exception):
    """Exception raised when there is a problem parsing the anidb XML file"""
    pass
class Search(object):
    """Small subset to search through the whoops database that contains all
    the anime titles known to anidb"""
    _parser_method, _query_parser, _search_method, _index = (None, None, None, None)
    _index_directory = config.data_dir("search_index")
    try:
        _cpu_count = cpu_count()
    except (NotImplementedError):
        _cpu_count = 1
    def load_index(self, clear=False):
        try:
            self._index = open_dir(self._index_directory)
        except (EmptyIndexError):
            clear = True
        if (not os.path.exists(self._index_directory)) or (clear):
            if (not os.path.exists(self._index_directory)):
                os.mkdir(self._index_directory)
            schema = Schema(title=TEXT(stored=True),
                            kind=NUMERIC(stored=True),
                            language=KEYWORD(stored=True),
                            id=NUMERIC(stored=True))
            self._index = create_in(self._index_directory, schema)
        else:
            try:
                self._index = open_dir(self._index_directory)
            except (EmptyIndexError):
                raise SearchInvalid("Corrupt search index")
        self._query_parser = QueryParser("title", self._index.schema)
        self._parser_method = self._query_parser.parse
        self._searcher = self._index.searcher()
        self._search_method = self._searcher.search
    def update_search(self):
        self.load_index(clear=True) # clearing index
        writer = MultiSegmentWriter(self._index, procs=self._cpu_count)
        try:
            import gzip
            f = gzip.GzipFile(filename="F:/animetitles.dat.gz")
        except (IOError):
            f = tools.urlopen("http://anidb.net/api/animetitles.dat.gz")
        try:
            for line in f:
                """id|type|language|title"""
                if (line.strip()[:1] != "#"):
                    line = line.decode("utf-8", "replace")
                    line = line.split(u"|")
                    writer.add_document(id=line[0],
                                        kind=line[1],
                                        language=line[2],
                                        title=line[3].strip())
        except:
            logging.exception("Error occured")
            writer.commit()
        else:
            writer.commit()
            print "Complete"
        f.close()
    def search(self, query):
        """
        Search the local aniDB title database for a match
        
        Uses the default whoosh query language, possible columns are
        
        language: The language of the title
        id: The ID anidb gave to this title
        kind: What kind of title this is where:
            1 = Primary title (One per anime)
            2 = Synonyms (Multiple per anime)
            3 = Short titles (Multiple per anime)
            4 = Official title (One per language)
        title: The anime title, database contains several languages
        """
        if (not self._index):
            try:
                self.load_index()
            except (SearchInvalid):
                raise
        if (not self._parser_method) or (not self._search_method):
            raise SearchInvalid("You need to call update_search first")
        if (not isinstance(query, unicode)):
            raise SearchInvalid("Requires unicode string")
        return self._search_method(self._parser_method(query))
        
class API(object):
    """
    a class that handles all data handling with the anidb API.
    This includes caching, saving to disk, fetching new entries,
    fetching from disk and other related things.
    
    Due to API restrictions all calls to the API are at the least
    2 seconds apart, and are only done once a day. This is handled by
    this class and can't be disabled by the user. Calls more often than
    2 seconds apart are BLOCKING and won't return until it is over. The
    blocking only counts if it requires access to the API.
    
    Disk caching is on a 3 day refresh timer, meaning it will refresh
    the data when the info is older than 3 days. This is subject to change
    
    TO DO:
        Write to disk
        Read from disk
        Read from API
        Event handlers
        Caching
        
    """
    # timeout in days
    _disk_cache_timeout = 3
    # Fileobj cache
    _file_cache = {}
    _cache = {}
    _disk_queue = {}
    # registered version with api
    _api_clientver = 1
    _api_client = "amazingweeaboo"
    _api_protover = 1
    
    _timeout = time() - 2.0
    _lock = Lock()
    def retrieve(self, aid, version=VERSION):
        """Retrieve the data for the anime ID 'aid',
        this returns an object that can change with each version
        released. Use the version argument to get a specific version
        kind.
        """
        # lock it, the archive doesn't like multiple accesses
        with (self._lock):
            # First check cache (Disabled if older version requested)
            if (version == VERSION):
                if (aid in self._cache):
                    return self._cache[aid]
            # Second check disk
            if (self._check_disk(aid)):
                # it is on disk, prepare it for cache and return
                # if older version just return
                data = self._read_disk(aid)
                if (data != False):
                    xml_tree = self._wrap_xml(data)
                    if (not xml_tree):
                        # Parsing error
                        return None
                    else:
                        # no problems, go parse it
                        # Picture URL first
                        self._cache[aid] = self.parse_xml(xml_tree)
                        if (not self._write_disk(xml_tree, aid)):
                            # Add checking code here really
                            self._disk_queue[aid] = xml_tree
                        return self._cache[aid]
            # Third retrieve from anidb
            if ((time() - self._timeout) <= 2.0):
                # timeout isn't over
                sleep(time() - self._timeout)
            API_url = "http://api.anidb.net:9001/httpapi?"\
                        "request=anime"\
                        "&client={client}"\
                        "&clientver={clientver}"\
                        "&protover={proto}&aid={aid}"
            xml_tree = self._wrap_xml(tools.urlopen(API_url\
                                         .format(client=self._api_client,
                                         clientver=self._api_clientver,
                                         proto=self._api_protover,
                                         aid=aid)))
            self._timeout = time()
            if (not xml_tree):
                # Parsing error
                return None
            else:
                # no problems, go parse it
                self._cache[aid] = self.parse_xml(xml_tree)
                if (not self._write_disk(xml_tree, aid)):
                    # Add checking code here really
                    self._disk_queue[aid] = xml_tree
                return self._cache[aid]
    @staticmethod
    def parse_xml(self, xml_tree):
        namespace = "{http://www.w3.org/XML/1998/namespace}"
        # Fill with default values
        result = {'img': u"none.png",
                  'episodes': u'?',
                  'type': u'Unknown',
                  'start': u'?',
                  'end': u'?',
                  'description': u'No description found',
                  'categories': [],
                  'tags': []
                  }
        result['id'] = xml_tree.getroot().get('id', u'0')
        # Dictionary to map simple tree.find(value).text to
        regulars = {
                    'img': 'picture',
                    'episodes': 'episodecount',
                    'type': 'type',
                    'end': 'enddate',
                    'start': 'startdate',
                    'description': 'description',
                    }
        for key, find in regulars.iteritems():
            try:
                result[key] = xml_tree.find(find).text
            except (AttributeError):
                pass
        try:
            result['categories'] = [element.text for element in \
                                xml_tree.find("categories").iter("name")]
        except (AttributeError):
            pass
        try:
            result['tags'] = [element.text for element in \
                          xml_tree.find("tags").iter("name")]
        except (AttributeError):
            pass
        #result['episode_titles'] = [element.text for element in \
        #                            xml_tree.find("episodes").iter("title") \
        #                            if element.get(namespace + "lang")\
        #                            == 'en']
        return result
    def _check_disk(self, aid):
        """Check the disk cache for the anime ID,
        returns True if it exists and False if it doesn't.
        
        Should not close the file due to high chance of a
        self._read_disk() coming afterwards.
        
            TODO:
                check if file still exists handle this case
        """
        
        # Old tarfile samples
        #import tarfile
        #if (not self._tar):
        #    try:
        #        self._tar = self._open_cache_tar()
        #    except (tarfile.TarError):
        #        return False
        #try:
        #    tarinfo = self._tar.getmember("anime" + str(aid))
        #except (KeyError):
        #    return False
        #else:
        #    if ((time() - tarinfo.mtime) >= (self._disk_cache_timeout * 86400)):
        #        return False
        #    return True
        
        # Not caring for cache and stuff currently just return it directly
        return bool(self.anime_data_path(aid))
    @staticmethod
    def anime_data_path(aid, send_path=False):
            path = os.path.join(config.data_dir("anidb"), "anime" + str(aid))
            if (os.path.exists(path)):
                # We know we have a file
                return path
            elif (send_path):
                # Asked for the path
                return path
            else:
                # no file fuck yourself
                return None
    def _read_disk(self, aid):
        """Return an file object to the caller
        of the XML data read from disk
        
            TODO:
                check if file still exists
        """
        #import tarfile
        #if (not self._tar):
        #    try:
        #        self._tar = self._open_cache_tar()
        #    except (tarfile.TarError):
        #        return False
        #try:
        #    tarinfo = self._tar.getmember("anime" + str(aid))
        #except (KeyError):
        #    return False
        #else:
        #    if ((time() - tarinfo.mtime) >= (self._disk_cache_timeout * 86400))\
        #   and (self._disk_cache_timeout != 0):
        #        return False
        #    try:
        #        f = self._tar.extractfile(tarinfo)
        #    except:
        #        logging.exception("Unexpected error extracting XML file")
        #        return False
        #    else:
        #        return False if f == None else f
        import gzip
        
        filename = self.anime_data_path(aid)
        if (filename != None):
            try:
                fileobj = gzip.GzipFile(filename=self.anime_data_path(aid))
            except (IOError):
                logging.exception("Failed opening disk file")
            else:
                self._file_cache[aid] = fileobj
                return fileobj
            return False
        else:
            # Why are we even here???
            return False
        
    def _write_disk(self, xmltree, aid):
        """Writes the xmltree to a file"""
        import gzip
        write_to_file = True
        if (aid in self._file_cache):
            # File exists, no need to write it again so flip switch
            write_to_file = False
        try:
            fileobj = gzip.GzipFile(filename=self.anime_data_path(aid,
                                                            send_path=True),
                                    mode="w")
        except (IOError):
            logging.exception("Problem opening file for writing")
            return False
        if (write_to_file):
            try:
                xmltree.write(fileobj, encoding="utf-8")
            except (IOError):
                logging.exception("Problem writing anidb xml to file")
                return False
            else:
                return True
            finally:
                fileobj.close()
        else:
            return True
    @staticmethod
    def _wrap_xml(xml_file):
        """Wrap a XML file into a ElementTree object"""
        try:
            return ElementTree.parse(xml_file)
        except (ElementTree.ParseError):
            return False
        except:
            logging.exception("Unexpected error wrapping XML file")
    def _open_cache_tar(self, mode="r"):
        import tarfile
        if (not mode in ["r", "w"]):
            raise tarfile.TarError("Unsupported mode")
        try:
            return tarfile.open(config.data_file("anidb.tar.gz"),
                                    mode + ":gz", format=tarfile.PAX_FORMAT,
                                    encoding="utf-8")
        except (IOError) as (errno, strerror):
            error_tuple = ("IO", errno, strerror)
            self._errors.append(error_tuple)
        except:
            logging.exception("Unexpected error opening disk cache")
            