#!coding:utf-8
# vim: tabstop=4 shiftwidth=4 softtabstop=4
"""
System-level utilities and helper functions.
"""
import shutil
import tempfile
import errno
import time
import logging
import os
import random
import datetime
import sys
import shlex
import cPickle
from rest_framework import exceptions
import rsa
from cvirt.common import exception

import types
import functools
import signal

#we want not to install eventlet just for using utils
try:
    from eventlet import event
    from eventlet import greenthread
    from eventlet import semaphore
    from eventlet.green import subprocess
    import lockfile
except:
    import subprocess

import socket
import inspect
import random

LOG = logging.getLogger("thor.utils")
CVIRT_LOCK_PATH = '/opt/tcloud/lock'
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"

def die(message, *args):
    print >> sys.stderr, message % args
    sys.exit(1)

def get_host_name():
    return socket.gethostname()

def utcnow():
    #return datetime.datetime.utcnow()
    return datetime.datetime.now()

def cnode_import_class(import_str):
    '''由于console和cnode使用同一套代码，为了避免console安装不需要的依赖，在console上导入
    api的错误我们可以忽略'''

    #如果是启动worker，堆栈中会有celery信息
    #worker for start_worker.sh
    #celeryd for /etc/init.d/celeryd
    if 'worker' in sys.argv or 'celeryd' in sys.argv:
        try:
            return import_class(import_str)
        except exception.NotFound:
            return import_object(import_str)

def import_class(import_str):
    """Returns a class from a string including module and class"""
    mod_str, _sep, class_str = import_str.rpartition('.')
    try:
        __import__(mod_str)
        return getattr(sys.modules[mod_str], class_str)
    except (ImportError, ValueError, AttributeError):
        raise exception.NotFound('Class %s cannot be found' % class_str)


def import_object(import_str):
    """Returns an object including a module or module and class"""
    try:
        __import__(import_str)
        return sys.modules[import_str]
    except ImportError:
        cls = import_class(import_str)
        return cls()


def abspath(s):
    return os.path.join(os.path.abspath(os.path.dirname(__file__)), s)

def default_uuid():
    _uuid = [ random.randint(0, 255) for dummy in range(0, 16) ]
    return "-".join(["%02x" * 4, "%02x" * 2, "%02x" * 2, "%02x" * 2,
                     "%02x" * 6]) % tuple(_uuid)
def generate_mac():
    mac = [0x12, 0x03, 0xfe, random.randint(0x00, 0x7f),
           random.randint(0x00, 0xff), random.randint(0x00, 0xff)]
    return ':'.join(map(lambda x: "%02x" % x, mac))


def last_octet(address):
    return int(address.split(".")[-1])


def isotime(at=None):
    if not at:
        at = datetime.datetime.utcnow()
    return at.strftime(TIME_FORMAT)


def parse_isotime(timestr):
    return datetime.datetime.strptime(timestr, TIME_FORMAT)

def _subprocess_setup():
    # Python installs a SIGPIPE handler by default. This is usually not what
    # non-Python subprocesses expect.
    signal.signal(signal.SIGPIPE, signal.SIG_DFL)

def execute(*cmd, **kwargs):
    """
    Helper method to execute command with optional retry.

    :cmd                Passed to subprocess.Popen.
    :process_input      Send to opened process.
    :check_exit_code    Defaults to 0. Raise exception.ProcessExecutionError
                        unless program exits with this code.
    :delay_on_retry     True | False. Defaults to True. If set to True, wait a
                        short amount of time before retrying.
    :attempts           How many times to retry cmd.
    :run_as_root        True | False. Defaults to False. If set to True,
                        the command is prefixed by the command specified
                        in the root_helper FLAG.

    :raises exception.Error on receiving unknown arguments
    :raises exception.ProcessExecutionError
    """

    process_input = kwargs.pop('process_input', None)
    check_exit_code = kwargs.pop('check_exit_code', 0)
    delay_on_retry = kwargs.pop('delay_on_retry', True)
    attempts = kwargs.pop('attempts', 1)
    run_as_root = kwargs.pop('run_as_root', False)
    is_debug = kwargs.pop('is_debug', True)
    if len(kwargs):
        raise exception.Error(('Got unknown keyword args '
                               'to utils.execute: %r') % kwargs)

    if run_as_root:
        cmd = shlex.split('sudo') + list(cmd)
    cmd = map(str, cmd)
    while attempts > 0:
        attempts -= 1
        try:
            if is_debug:
                LOG.debug('Running cmd (subprocess): %s' % cmd), ' '.join(cmd)
            _PIPE = subprocess.PIPE
            obj = subprocess.Popen(cmd,
                                   stdin=_PIPE,
                                   stdout=_PIPE,
                                   stderr=_PIPE,
                                   close_fds=True,
                                   preexec_fn=_subprocess_setup
            )
            result = None
            if process_input is not None:
                result = obj.communicate(process_input)
            else:
                result = obj.communicate()
            obj.stdin.close()
            _returncode = obj.returncode
            if _returncode:
                LOG.debug('Result was %s' % _returncode)
                if type(check_exit_code) == types.IntType \
                    and _returncode != check_exit_code:
                    (stdout, stderr) = result
                    raise exception.ProcessExecutionError(
                        exit_code=_returncode,
                        stdout=stdout,
                        stderr=stderr,
                        cmd=' '.join(cmd))
            return result
        except Exception as e:
            LOG.debug('exception was %s ' % e)
            if not attempts:
                raise
            else:
                LOG.debug('%r failed. Retrying.', cmd)
                if delay_on_retry:
                    greenthread.sleep(random.randint(20, 200) / 100.0)
        finally:
            # this appears to be necessary to let the subprocess
            # call clean something up in between calls, without
            # it two execute calls in a row hangs the second one

            greenthread.sleep(0)


hwuuid = None
def hardware_uuid():
    global hwuuid
    if not hwuuid:
        out,_= execute('hal-get-property', '--udi', '/org/freedesktop/Hal/devices/computer', '--key', 'system.hardware.uuid')
        hwuuid = out.rstrip()
    return hwuuid


def sendmail(host,username,password,subject,tofrom,to,context):
    import smtplib
    from email.mime.text import MIMEText
    if not tofrom or not to:
        raise smtplib.SMTPDataError()

    msg = MIMEText(context, _charset='utf-8')
    msg['Subject'] = subject
    msg['From'] = tofrom
    msg['To'] = to
    msg['X-Priority'] = '1'
    msg['X-MSMail-Priority'] = 'High'

    s = smtplib.SMTP(host)
    #s.set_debuglevel(1)
    s.login(username,password)
    s.sendmail(tofrom,to,msg.as_string())
    s.quit()



_semaphores = {}




class _InterProcessLock(object):
    """Lock implementation which allows multiple locks, working around
    issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
    not require any cleanup. Since the lock is always held on a file
    descriptor rather than outside of the process, the lock gets dropped
    automatically if the process crashes, even if __exit__ is not executed.

    There are no guarantees regarding usage by multiple green threads in a
    single process here. This lock works only between processes. Exclusive
    access between local threads should be achieved using the semaphores
    in the @synchronized decorator.

    Note these locks are released when the descriptor is closed, so it's not
    safe to close the file descriptor while another green thread holds the
    lock. Just opening and closing the lock file can break synchronisation,
    so lock files must be accessed only using this abstraction.
    """

    def __init__(self, name):
        self.lockfile = None
        self.fname = name

    def __enter__(self):
        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                return self
            except IOError, e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise

    def __exit__(self, exc_type, exc_val, exc_tb):
        try:
            self.unlock()
            self.lockfile.close()
        except IOError:
            LOG.exception("Could not release the acquired lock `%s`",
                          self.fname)

    def trylock(self):
        raise NotImplementedError()

    def unlock(self):
        raise NotImplementedError()




class _PosixLock(_InterProcessLock):
    def trylock(self):
        fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)

    def unlock(self):
        fcntl.lockf(self.lockfile, fcntl.LOCK_UN)



import fcntl
InterProcessLock = _PosixLock

_semaphores = {}


def synchronized(name, lock_file_prefix='cvirt-', external=True, lock_path=None):
    """Synchronization decorator.

    Decorating a method like so::

        @synchronized('mylock')
        def foo(self, *args):
           ...

    ensures that only one thread will execute the foo method at a time.

    Different methods can share the same lock::

        @synchronized('mylock')
        def foo(self, *args):
           ...

        @synchronized('mylock')
        def bar(self, *args):
           ...

    This way only one of either foo or bar can be executing at a time.

    The lock_file_prefix argument is used to provide lock files on disk with a
    meaningful prefix. The prefix should end with a hyphen ('-') if specified.

    The external keyword argument denotes whether this lock should work across
    multiple processes. This means that if two different workers both run a
    a method decorated with @synchronized('mylock', external=True), only one
    of them will execute at a time.

    The lock_path keyword argument is used to specify a special location for
    external lock files to live. If nothing is set, then CONF.lock_path is
    used as a default.
    """

    def wrap(f):
        @functools.wraps(f)
        def inner(*args, **kwargs):
            # NOTE(soren): If we ever go natively threaded, this will be racy.
            #              See http://stackoverflow.com/questions/5390569/dyn
            #              amically-allocating-and-destroying-mutexes
            sem = _semaphores.get(name, semaphore.Semaphore())
            if name not in _semaphores:
                # this check is not racy - we're already holding ref locally
                # so GC won't remove the item and there was no IO switch
                # (only valid in greenthreads)
                _semaphores[name] = sem

            with sem:
                LOG.debug('Got semaphore "%(lock)s" for method '
                            '"%(method)s"...', {'lock': name,
                                                 'method': f.__name__})



                if external:
                    LOG.debug('Attempting to grab file lock "%(lock)s" '
                                'for method "%(method)s"...',
                              {'lock': name, 'method': f.__name__})
                    cleanup_dir = False

                    # We need a copy of lock_path because it is non-local
                    local_lock_path = lock_path
                    if not local_lock_path:
                        local_lock_path = CVIRT_LOCK_PATH

                    if not local_lock_path:
                        cleanup_dir = True
                        local_lock_path = tempfile.mkdtemp()

                    if not os.path.exists(local_lock_path):
                        os.makedirs(local_lock_path)

                    # NOTE(mikal): the lock name cannot contain directory
                    # separators
                    safe_name = name.replace(os.sep, '_')
                    lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
                    lock_file_path = os.path.join(local_lock_path,
                                                  lock_file_name)

                    try:
                        lock = InterProcessLock(lock_file_path)
                        with lock:
                            LOG.debug('Got file lock "%(lock)s" at '
                                        '%(path)s for method '
                                        '"%(method)s"...',
                                      {'lock': name,
                                       'path': lock_file_path,
                                       'method': f.__name__})
                            retval = f(*args, **kwargs)
                    finally:
                        LOG.debug('Released file lock "%(lock)s" at '
                                    '%(path)s for method "%(method)s"...',
                                  {'lock': name,
                                   'path': lock_file_path,
                                   'method': f.__name__})
                        # NOTE(vish): This removes the tempdir if we needed
                        #             to create one. This is used to
                        #             cleanup the locks left behind by unit
                        #             tests.
                        if cleanup_dir:
                            shutil.rmtree(local_lock_path)
                else:
                    retval = f(*args, **kwargs)
            return retval
        return inner
    return wrap




class LoopingCallDone(Exception):
    """Exception to break out and stop a LoopingCall.

    The poll-function passed to LoopingCall can raise this exception to
    break out of the loop normally. This is somewhat analogous to
    StopIteration.

    An optional return-value can be included as the argument to the exception;
    this return-value will be returned by LoopingCall.wait()

    """

    def __init__(self, retvalue=True):
        """:param retvalue: Value that LoopingCall.wait() should return."""
        self.retvalue = retvalue


class LoopingCall(object):
    def __init__(self, f=None, *args, **kw):
        self.args = args
        self.kw = kw
        self.f = f
        self._running = False

    def start(self, interval, now=True, waiting=-1):
        self._running = True
        done = event.Event()

        def _inner():
            time_count = 0
            if not now:
                greenthread.sleep(interval)
                time_count += interval
            try:
                while self._running:
                    self.f(*self.args, **self.kw)
                    if not self._running:
                        break
                    greenthread.sleep(interval)
                    time_count += interval

                    if waiting != -1 and time_count > waiting:
                        raise Exception("After %s seconds' waiting, break out "
                                        "looping call." % waiting)
            except LoopingCallDone, e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                logging.exception('in looping call')
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)

        self.done = done

        greenthread.spawn(_inner)
        return self.done

    def is_running(self):
        return self._running

    def stop(self):
        self._running = False

    def wait(self):
        return self.done.wait()


def in_period(start,end):
    return start<=datetime.datetime.now()<=end

def utcnow():
    return datetime.datetime.now()


def pick_dev(exclude,prefix='vd'):
    postfix = ['a','b','c','d']  #ide支持4个disk设备

    include = filter(lambda x:x not in exclude, map(lambda v:"%s%s" %(prefix,v),postfix))
    dev = None
    if include:
        dev = include[random.randint(0,len(include)-1)]
        exclude.append(dev)
    return exclude,dev


def get_license_info():
    try:
        with file("/etc/licenseinfo") as key:
            msg = key.read()
            dmsg = rsa.transform.binascii.a2b_hex(msg)
            with file(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'manager','tcloudpriv')) as prikeyfile:
                dmsg = rsa.decrypt(dmsg, rsa.PrivateKey.load_pkcs1(prikeyfile.read()))
            prikeyfile.close()
            licenseinfo = cPickle.loads(dmsg)
        key.close()
        return licenseinfo
    except Exception as e:
        logging.exception("error stack:%s" % e)
        raise exceptions.AuthenticationFailed('NoLicenseExist')


class safedict(object):
    def __init__(self,obj):
        self.obj = obj
        assert type(self.obj) == dict

    def __getitem__(self, item):
        if self.obj.has_key(item):
            return self.obj[item]

    def has_key(self,item):
        return self.obj.has_key(item)


if __name__ == "__main__":
    get_license_info()