#!/usr/bin/python
# -*- coding: utf-8 -*-
# File: extractor.py
# Author: Tomás Vírseda
# License: GPL v3
# Description: Metadata Extractor For Files Plugin

import os
import re
from os.path import abspath
from os.path import dirname
from stat import ST_SIZE, ST_ATIME, ST_MTIME, ST_CTIME
import tempfile
from urlparse import urlparse
from datetime import datetime

import extractor
from rdflib import URIRef
from rdflib import Literal
from rdflib import RDF
from xdg import Mime
import lxml.html
import feedparser
import urllib2
from urlparse import urlparse
from BeautifulSoup import BeautifulSoup

from vazaarlib.utils import get_logger, safe_name
from vazaarlib.namespaces import *


log = get_logger("Extractor")
xtract = extractor.Extractor()

class Metadata:
    def __init__(self, provider):
        self.provider = provider
        
    def extract(self, id, thing, thing_type):
        data = []
        now = str(datetime.now())[0:19]
        if thing_type == 'regular' or thing_type == 'dir':
            data = self.extract_from_file(thing)
        elif thing_type == 'string':
            data = self.extract_from_string(id, thing)
        elif thing_type == 'network':
            data = self.extract_from_network(thing)
        try:
            data.append((NAO['created'], Literal(now)))
            data.append((NAO['lastModified'], Literal(now)))
        except:
            raise

        return data
        
    def extract_from_file(self, thing):
        data = []

        path = abspath(thing)
        rest, extension = os.path.splitext(path)
        ext = (extension[1:]).lower()
        basename = os.path.basename(rest)

        if (len(extension) != 0):
            data.append((NFO['fileExtension'], Literal(ext)))
        else:
            data.append((NFO['fileExtension'], Literal("")))

        # 2. Get Core Metadata Properties
        try:
            """It's a nonsense to save these values. It should better
            ask for them when user select a given resource
            #statime = os.stat(path)[ST_ATIME] # Last access.
            #stmtime = os.stat(path)[ST_MTIME] # Last modif.
            # The ``ctime'' as reported by the operating system.
            # On some systems (like Unix) is the time of the last
            # metadata change, and, on others (like Windows), is the
            #creation time (see platform documentation for details).
            stctime = os.stat(path)[ST_CTIME]
            """
            stsize = Literal(os.stat(path)[ST_SIZE]) # Size in bytes
            mime = Mime.get_type(path)
            mtype = mime.media
            stype = mime.subtype
            nietype = ""
            if mtype == "text":
                nfotype = "TextDocument"
            elif (mtype == "inode"):
                nfotype = "Folder"
            else:
                nfotype = mtype.capitalize()

            # NFO['belongsToContainer'] is a useful property to indicate
            # that a resource belong to a specific container
            # Current value for this property is Literal but in a future
            # should be a URIRef (postproccesing?)
            data.append((NFO['belongsToContainer'], Literal(dirname(path))))
            data.append((RDF.type, NFO[nfotype]))
            data.append((NIE['mimeType'], Literal(mime)))
            data.append((NIE['mimeMedia'], Literal(mtype)))
            data.append((NIE['mimeSubtype'], Literal(stype)))
            data.append((NFO['fileUrl'], Literal(path)))
            data.append((NFO['fileName'], Literal(basename+extension)))
            data.append((NFO['fileBasename'], Literal(basename)))
            data.append((NFO['fileUrl'], Literal(path)))
            data.append((NFO['fileScheme'], Literal('file')))
            data.append((NFO['fileSize'], Literal(stsize)))
            data.append((NIE['title'], Literal(basename + extension)))
            #log.debug("type: %s" % nietype)
            #hexdigest = hash.update("file://" + path).hexdigest()
            #data.append((NFO['hashAlgorithm'], Literal("MD5")))
            #data.append((NFO['hashValue'], Literal(md5)))
            #data.append((NFO['fileLastAccessed'], Literal(DateTime.DateFromTicks(statime))))
            #data.append((NFO['fileLastModified'], Literal(DateTime.DateFromTicks(stmtime))))
            #data.append((NFO['fileCreated'], Literal(DateTime.DateFromTicks(stctime))))

            # Guess more metadata for this file
            guess = self.__guess_metadata(path)
            #print "guess", guess
            data += guess

            return data
        except Exception, error:
            log.error("%s: %s" % (error, path))
            return None
        

    def extract_from_network(self, thing, recursive=False):
        # RFC 1738: <scheme>://<user>:<password>@<host>:<port>/<url-path>;<params>?<query>#<fragment>
        log.debug("BEGIN")
        data = []
        parsed_url = urlparse(thing)
        url = parsed_url.geturl()

        data.append((NFO['fileUrl'], Literal(url)))
        data.append((NFO['fileScheme'], Literal(parsed_url.scheme)))
        data.append((NFO['fileHostname'], Literal(parsed_url.hostname)))
        if (parsed_url.fragment):   data.append((NFO['fileFragment'],   Literal(parsed_url.fragment)))
        if (parsed_url.netloc):     data.append((NFO['fileNetloc'],     Literal(parsed_url.netloc)))
        if (parsed_url.params):     data.append((NFO['fileParams'],     Literal(parsed_url.params)))
        if (parsed_url.path):       data.append((NFO['filePath'],       Literal(parsed_url.path)))
        if (parsed_url.port):       data.append((NFO['filePort'],       Literal(parsed_url.port)))
        if (parsed_url.query):      data.append((NFO['fileQuery'],      Literal(parsed_url.query)))

        #TODO: add more metadata from tags if it is webpage or feed

        data.append((NIE['mimeType'], Literal('text/html')))
        data.append((NIE['mimeMedia'], Literal('text')))
        data.append((NIE['mimeSubtype'], Literal('html')))

        if (parsed_url.scheme[0:4] == 'http'):
            log.debug("FEEDPARSER BEGIN")
            try:
                res = feedparser.parse(url)
                log.debug("FEEDPARSER STOP SUCCESSFULLY")
                data.append((NFO['fileStatus'], Literal(str(res.status))))
                # is it a feed?
                if len(res.version) > 0:
                    data.append((RDF.type, NFO['Feed']))
                    # TODO: should add the protocol (atom10, rss)?
                    u = feedparser.parse(url)
                    data.append((NIE['title'], Literal(u.feed.title)))
                else:
                    data.append((RDF.type, NFO['Website']))
                    log.debug("BEGIN WEB ANALYSIS")
                    tags = self.__get_html_tags(url)
                    #links = self.__get_links(url)
                    data += tags
                    #data += links

                    try:
                        t = lxml.html.parse(url)
                        title = t.find(".//title").text
                    except Exception, error:
                        title = url
                    data.append((NIE['title'], Literal(safe_name(title))))

                    log.debug("title: %s" % title)
                    log.debug("END WEB ANALYSIS")
            except Exception, error:
                # Sometimes Feedparser fails to parse an url
                log.debug(error)
                data.append((RDF.type, NFO['Website']))
                data.append((NIE['title'], Literal(url)))
        else:
            # it could be
            data.append((RDF.type, NFO['RemoteDataObject']))
            data.append((NIE['title'], Literal(url)))
        log.debug("END")
        return data


    def extract_from_string(self, id, thing):
        data = []
        try:
            title = thing[0:100].strip()
        except:
            title = "No title. Change it manually."
        data.append((RDF.type, NFO['Clipboard']))
        data.append((NIE['title'], Literal(safe_name(title))))
        data.append((NIE['mimeType'], Literal('text/plain')))
        data.append((NIE['mimeMedia'], Literal('text')))
        data.append((NIE['mimeSubtype'], Literal('plain')))
        data.append((NFO['fileUrl'], Literal(str(id)[9:])))

        return data

    def __get_html_tags(self, url):
        try:
            contents = urllib2.urlopen(url).read()
            filename = tempfile.mktemp()
            f = open(filename, 'w')
            f.write(contents)
            f.close()

            xtract = extractor.Extractor()
            keys = xtract.extract(filename)
            data = []
            for keyword_type, keyword in keys:
                if keyword_type == 'keywords':
                    for label in keyword.split(','):
                        tag = label.split()
                        tag = str(('').join(tag))
                        tag = re.sub('[^a-z0-9 ]', '', tag.strip().lower(),)
                        data.append((NAO['hasTag'], Literal(tag)))

            return data
        except Exception, error:
            print "get_html_tags", error

            return []
    

    def __get_links(self, url):
        try:
            html = urllib2.urlopen(url).read()
            soup = BeautifulSoup(html)
            anchors = soup.findAll('a')
            links = []
            for a in anchors:
                if not url in a['href']:
                    o = urlparse(a['href'])
                    if o.scheme == 'http':
                        try:
                            t = lxml.html.parse(a['href'])
                            title = t.find(".//title").text
                            links.append(a['href'])
                        except Exception, error: raise

            for link in links:
                data.append((NIE['links'], Literal(link)))

            return data
        except Exception, error:
            return []
        
    def __extractor2nepomuk(self, keyword_type):
        try:
            dc, dct, pl, pr = EXTRACTOR_KEYWORD[keyword_type]
            return NSBINDINGS[pl][pr]
        except Exception, error:
            return None
        
    def __guess_metadata(self, path):
        guess = []    
        
        try:
            keys = xtract.extract(path)
            
            for keyword_type, keyword in keys:
                try:
                    predicate = self.__extractor2nepomuk(str(keyword_type))
                    if predicate:
                        guess.append((predicate, Literal(keyword)))
                    else:
                        print keyword_type, keyword
                except Exception, error:
                    pass
            return guess
        except Exception, error:
            print error
            return guess














