# Copyright 2008 Thomas Quemard
#
# Grabova is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3.0, or (at your option)
# any later version.
#
# Grabova is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.

import httplib
import re
import urlparse
import xml.dom.minidom

class BadSourceString (Exception):
    pass

class Source:
    name = ""

    def export_source(self):
        return ""

    def get_items(self):
        return []

    def import_source(self):
        pass

    def parse_source(self, src):
        src_parsed = []
        e = re.compile("([a-z]+)://([^/]+)(?:/(.+))?")
        matches = e.match(src)

        if matches != None:
            match = matches.groups(1)
            for i in range(0, len(match)):
                src_parsed.append(str(match[i]))

        return src_parsed


class XmlSource (Source):
    def parse_xml_to_dom(self, xml_str):
        dom = None
        try:
            dom = xml.dom.minidom.parseString(xml_str)
        except:
            dom = None
        return dom


class Mininova (XmlSource):
    host = "www.mininova.org"
    url_type = ""
    url_var = ""

    def __init__(self):
        self.name ="Mininova"

    def import_source(self, src):
        url = self.parse_source(src)
        if len(url) == 3:
            self.url_type = url[1]
            self.url_var = url[2]
        else:
            raise BadSourceString()

    def export_source(self):
        src = "mininova://"
        if self.url_type == "category":
            src += "category/" + self.url_var

        return src

    def get_items(self):
        url = ""
        if self.url_type == "category":
            url = "/rss.xml?sub=" + self.url_var

        con = httplib.HTTPConnection(self.host)
        con.request("GET", url)
        resp = con.getresponse()

        items = []
        feed = self.parse_xml_to_dom(resp.read())
        if feed != None:
            feed_items = feed.getElementsByTagName("item")

            for feed in feed_items:
                enclosure = feed.getElementsByTagName("enclosure")[0]

                item = {}
                item["title"] = str(feed.getElementsByTagName("title")[0].childNodes[0].nodeValue)
                item["size"] = int(enclosure.attributes["length"].nodeValue)
                item["seeders"] = int(self.get_seeders_leechers(item["title"])[0])
                item["url"] = str(enclosure.attributes["url"].nodeValue)
                items.append(item)

        if con != None:
            con.close()

        return items

    def get_seeders_leechers(self, string):
        e = re.compile("\((\d+)S/(\d+)L\)")
        matches = None
        result = (0, 0)

        if e:
            matches = e.search(string)

        if matches:
            groups = matches.groups()
            if len(groups) == 2 and float(groups[0]) > 0 and float(groups[1]) > 0:
                result = (int(groups[0]), int(groups[1]))

        return result



