#
# Copyright 2009 Red Hat, Inc. and/or its affiliates.
#
# Licensed to you under the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.  See the files README and
# LICENSE_GPL_v2 which accompany this distribution.
#

import os.path
import glob
import threading
import logging
import re
import signal
import errno

from config import config
import constants
import misc
import sd
import sp
import vg
import pv
import safelease
import blockVolume
import multipath
from sd import SD_METADATA_SIZE, MAX_DOMAIN_DESCRIPTION_SIZE
import storage_exception as se
import iscsi

from remoteFileHandler import remoteFileHandlerPool as ros

BLOCKSD_DIR = "blockSD"

MASTERLV = "master"
MASTERLV_SIZE = "1G"

VGUUID = "VGUUID"

METADATA_BASE_SIZE = 378
MAX_PVS = config.getint('irs', 'maximum_allowed_pvs')
PVS_METADATA_SIZE = MAX_PVS * 142

log = logging.getLogger("irs")

class BlockSDMetadata(sd.SDMetadata):
    """
    Block Storage Domain metadata implementation
    """
    def __init__(self, metavol):
        sd.SDMetadata.__init__(self, SD_METADATA_SIZE)
        # FileSDMetadata is kept in the file
        self.metavol = metavol
        # FileSD keeps additional parameter in its metadata
        self.metakeys.append(VGUUID)
        self._vgName, self._lv = metavol.split("/")[-2:]
        self._vg = None

    def _validate(self, md):
        """
        Validate the PV keys. All other validation are done by the parent validate.
        """
        for i in md:
            if i.find("=") < 0:
                continue
            pair = i.split("=")
            # Everything should be in form PARAM=VALUE
            if len(pair) != 2:
                continue
            key = pair[0].strip()
            # Validate and Insert PV keys into metakeys
            if key not in self.metakeys and key.startswith("PV"):
                try:
                    if int(key[2:]) >= 0:
                        self.metakeys.append(key) # make general validate pass
                except:
                    msg = "KEY %s is not registered" % (key)
                    self.log.warning(str(se.MetaDataValidationError(msg)))


        return sd.SDMetadata._validate(self, md)

    def _get(self):
        # Fetch the metadata from metadata volume
        if not os.path.exists(self.metavol):
            if self._vg is None:
                self._vg = vg.VG(self._vgName)
            self._vg.activateLV(self._lv)
        m = misc.readblockSUDO(self.metavol, 0, SD_METADATA_SIZE)
        # Read from metadata volume will bring a load of zeroes trailing
        # actual metadata. Strip it out.
        metadata = [i for i in m if len(i) > 0 and i[0] != '\x00' and "=" in i]

        self.log.debug("(BlockSDMetadata._get) metadata=%s",
            str(metadata)[:SD_METADATA_SIZE])

        return metadata

    def _set(self, metadata):
        # Write `metadata' to metadata volume
        self.log.debug("(BlockSDMetadata._set) metadata=%s", str(metadata)[:SD_METADATA_SIZE])
        metadata = [i + '\n' for i in metadata]
        # Clear out previous data - it is a volume, not a file
        if not os.path.exists(self.metavol):
            if self._vg is None:
                self._vg = vg.VG(self._vgName)
            self._vg.activateLV(self._lv)
        try:
            misc.writeblockSUDO(self.metavol, 0, SD_METADATA_SIZE, metadata, sudo=False)
        except se.MiscBlockWriteIncomplete:
            self.log.debug("(BlockSDMetadata._set) write failed, retrying")
            misc.writeblockSUDO(self.metavol, 0, SD_METADATA_SIZE, metadata, sudo=False)

class BlockStorageDomain(sd.StorageDomain):
    mountpoint = os.path.join(sd.StorageDomain.storage_repository,
            sd.DOMAIN_MNT_POINT, BLOCKSD_DIR)

    def __init__(self, sdUUID, vgobj):
        sd.StorageDomain.__init__(self, sdUUID)
        self.domaindir = os.path.join(self.mountpoint, self.sdUUID)
        self.vg = vgobj
        self.refreshSpecialVolumes()
        self.metavol = self.vg.lvPath(sd.METADATA)
        self._metadata = BlockSDMetadata(self.metavol)
        # _extendlock is used to prevent race between
        # VG extend and LV extend.
        self._extendlock = threading.Lock()
        self.imageGarbageCollector()

    @classmethod
    def metaSize(cls, vgroup):
        ''' Calc the minimal meta volume size in MB'''
        # In any case the metadata volume cannot be less than 512MB for the
        # case of 512 bytes per volume metadata, 2K for domain metadata and
        # extent size of 128MB. In any case we compute the right size on line.
        minmetasize = (sd.SD_METADATA_SIZE / sd.METASIZE * vgroup.vgextsize +
            (1024 * 1024 - 1)) / (1024 * 1024)
        metaratio = vgroup.vgmetaratio
        metasize = (vgroup.vgextcount * sd.METASIZE + (1024*1024-1)) / (1024*1024)
        metasize = max(minmetasize, metasize)
        if metasize > vgroup.vgfree / (1024*1024):
            raise se.VolumeGroupSizeError("volume group has not enough extents %s (Minimum %s), VG may be too small" % (vgroup.vgextcount, (1024*1024)/sd.METASIZE))
        cls.log.info("(blockSD metadata) size %s MB (metaratio %s)" % (metasize, metaratio))
        return metasize

    @classmethod
    def create(cls, sdUUID, domainName, domClass, vgUUID, storageType):
        """ Create new storage domain
            'sdUUID' - Storage Domain UUID
            'domainName' - storage domain name
            'vgUUID' - volume group UUID
            'domClass' - Data/Iso
        """
        cls.log.info("(blockSD.create) sdUUID=%s domainName=%s vgUUID=%s "
            "domClass=%s", sdUUID, domainName, vgUUID, domClass)

        if len(domainName) > sd.MAX_DOMAIN_DESCRIPTION_SIZE:
            raise se.StorageDomainDescriptionTooLongError()

        vgroup = None
        for i in vg.getVGList():
            if i.uuid == vgUUID:
                vgroup = i

        if not vgroup:
            raise se.VolumeGroupDoesNotExist(vgUUID)
        if vgroup.hasSDTag():
            raise se.VolumeGroupHasDomainTag(vgUUID)
        if vgroup.listLVS():
            raise se.StorageDomainNotEmpty(vgUUID)
        if len(vgroup.listPVS()) > MAX_PVS:
            cls.log.debug("%d > %d" , len(vgroup.listPVS()), MAX_PVS)
            raise se.StorageDomainIsMadeFromTooManyPVs()

        # Set the name of the VG to be the same as sdUUID
        if vgroup.name != sdUUID:
            vgroup.rename(sdUUID)
        # Create metadata service volume
        metasize = cls.metaSize(vgroup)
        vgroup.createLV(sd.METADATA, "%sM" % (metasize), cont=False)
        (dev, ext) = vgroup.getFirstExt(sd.METADATA)
        if ext != 0:
            cls.log.error("INTERNAL: metadata ext is not NULL")
            raise se.MetaDataMappingError("vg %s: metadata extent is not the first extent" % vgUUID)
        # Create the mapping right now so the index 0 is guaranteed
        # to belong to the metadata volume. Since the metadata is at
        # least SDMETADATA/METASIZE units, we know we can use the first
        # SDMETADATA bytes of the metadata volume for the SD metadata.
        # pass metadata's dev to ensure it is the first mapping
        mapping = cls.createMetadataMapping(vgroup, dev)

        # Create the rest of the BlockSD internal volumes
        vgroup.createLV(sd.LEASES, sd.LEASES_SIZE)
        vgroup.createLV(sd.IDS, sd.IDS_SIZE)
        vgroup.createLV(sd.INBOX, sd.INBOX_SIZE)
        vgroup.createLV(sd.OUTBOX, sd.OUTBOX_SIZE)
        vgroup.createLV(MASTERLV, MASTERLV_SIZE)

        # Create VMS file system
        _createVMSfs(os.path.join("/dev", vgroup.name, MASTERLV))

        vgroup.deactivateLV(MASTERLV)

        # Refresh/reset volume permissions
        _refreshVGPerms(vgroup)

        metavol = vgroup.lvPath(sd.METADATA)

        # Zero out the metadata and special volumes before use
        try:
            path = metavol
            misc.ddCopy("/dev/zero", path, metasize*(1024*1024))
            path = vgroup.lvPath(sd.INBOX)
            misc.ddCopy("/dev/zero", path, sd.sizeStr2Int(sd.INBOX_SIZE))
            path = vgroup.lvPath(sd.OUTBOX)
            misc.ddCopy("/dev/zero", path, sd.sizeStr2Int(sd.OUTBOX_SIZE))
        except se.StorageException:
            raise se.VolumesZeroingError(path)

        md = BlockSDMetadata(metavol)

        # create domain metadata
        tmp = [
            "%s=%s" % (sd.VERSION, sd.SDMETADATA_VERSION),
            "%s=%s" % (sd.SDUUID, sdUUID),
            "%s=%s" % (sd.TYPE, sd.type2name(storageType)),
            "%s=%s" % (sd.CLASS, sd.class2name(domClass)),
            "%s=%s" % (sd.DESCRIPTION, domainName),
            "%s=%s" % (sd.ROLE, sd.REGULAR_DOMAIN),
            "%s=%s" % (sd.POOL, ''),
            "%s=%s" % (sd.MASTER_VER, 0),
            "%s=%s" % (sd.SPM_ID, -1),
            "%s=%s" % (sd.LVER,-1),
            "%s=%s" % (sd.LOCK_POLICY, ''),
            "%s=%s" % (sd.LOCK_RENEWAL_INTERVAL_SEC, ''),
            "%s=%s" % (sd.LEASE_TIME_SEC, ''),
            "%s=%s" % (sd.IO_OP_TIMEOUT_SEC, ''),
            "%s=%s" % (sd.LEASE_RETRIES, ''),
            "%s=%s" % (VGUUID, vgUUID)
        ]

        i = 0
        for m in mapping:
            # i is a number
            tmp.append("PV%s=" % i +
                "pv:%s," % m["guid"] +
                "uuid:%s," % m["uuid"] +
                "pestart:%s," % m["pestart"] +
                "pecount:%s," % m["pecount"] +
                "mapoffset:%s" % m["mapoffset"])
            i += 1

        md.metadata = tmp
        # To be on a safe side
        md.clearcache()
        # Why do we need it here ?
        #self.disconnect()

        # Mark VG with Storage Domain Tag
        vgroup.setSDTag()

        bsd = BlockStorageDomain(sdUUID, vgroup)

        bsd.initSPMlease()

        return bsd


    @classmethod
    def createMetadataMapping(cls, vgroup, first):
        '''
        Create meta data mapping, and make 'first' to the the first mapping.
        '''
        pvlist = vgroup.listPVS()

        first = os.path.basename(first)
        if len(pvlist) < 1:
            raise se.MetaDataMappingError("VG %s: PV list is empty" % vgroup.name)

        mapoffset = 0
        firstpv = None
        for pv in pvlist:
            if pv["guid"] == first:
                firstpv = pv

        if not firstpv:
            raise se.MetaDataMappingError("VG %s: target PV %s does not belong to VG [pvlist %s]" %
                (vgroup.name, first, str(pvlist)))
        pvlist.remove(firstpv)
        pvlist.insert(0, firstpv)
        cls.log.info("Create: SORT MAPPING: %s" % pvlist)

        # Create mappings
        mapping = []
        for pv in pvlist:
            map = dict(guid=pv["guid"], uuid=pv["uuid"], pestart=0,
                pecount=int(pv["pecount"]), mapoffset=mapoffset)
            mapoffset += int(pv["pecount"])
            mapping.append(map)
        cls.log.info("Create META MAPPING: %s" % mapping)
        return mapping


    def produceVolume(self, imgUUID, volUUID=None):
        """
        Produce a type specific volume object
        """
        return blockVolume.BlockVolume(self.sdUUID, self.getMetaParam(sd.POOL), imgUUID, volUUID)


    def getVolumeClass(self):
        """
        Return a type specific volume generator object
        """
        return blockVolume.BlockVolume


    def createVolume(self, isStopping, imgUUID, size, volFormat, preallocate, diskType, volUUID, desc, srcImgUUID, srcVolUUID):
        """
        Create a new volume
        """
        return blockVolume.BlockVolume.create(isStopping, self.sdUUID, self.getMetaParam(sd.POOL), imgUUID, size, volFormat, preallocate, diskType, volUUID, desc, srcImgUUID, srcVolUUID)


    def updateMapping(self):
        # First read existing mapping from metadata
        mapping = self.readMetadataMapping()
        existing_devs = [i["guid"] for i in mapping]
        self.log.info("sd %s: update META MAPPING: existing devices %s" % (self.sdUUID, existing_devs))
        mapoffset = int(mapping[-1]["mapoffset"]) + int(mapping[-1]["pecount"])
        i = len(mapping)

        # Now add new PVs to the mapping
        pvlist = self.vg.listPVS()
        for pv in pvlist:
            self.log.debug("sd %s: update mapping - checking device %s" % (self.sdUUID, pv["guid"]))
            if pv["guid"] not in existing_devs:
                map = dict(guid=pv["guid"], uuid=pv["uuid"], pestart=0,
                    pecount=int(pv["pecount"]), mapoffset=mapoffset)
                mapoffset += int(pv["pecount"])
                mapping.append(map)
                self.log.info("sd %s: update mapping - new device found: %s" % (self.sdUUID, pv["guid"]))

        metadata = self.Metadata()
        for m in mapping[i:]:
            # i is a number
            metadata.append("PV%s=" % i +
                "pv:%s," % m["guid"] +
                "uuid:%s," % m["uuid"] +
                "pestart:%s," % m["pestart"] +
                "pecount:%s," % m["pecount"] +
                "mapoffset:%s" % m["mapoffset"])
            i += 1

        self.Metadata(metadata)
        self.log.debug("sd %s: update mapping to %s" % (self.sdUUID, metadata))

        # To be on a safe side
        self._metadata.clearcache()

        return mapoffset


    def extend(self, devlist):
        mapping = self.readMetadataMapping()
        if len(mapping) + len(devlist) > MAX_PVS:
            raise se.StorageDomainIsMadeFromTooManyPVs()

        self._extendlock.acquire()
        lvmFilter = [p['guid'] for p in mapping] + devlist
        try:

            knowndevs = multipath.getMPDevNames()
            devices = []

            for dev in devlist:
                if dev in knowndevs:
                    devices.append(pv.PV(dev, lvmFilter=lvmFilter))
                else:
                    raise se.InvalidPhysDev(dev)

            for dev in devices:
                self.vg.addDevice(dev)
            self.updateMapping()
            newsize = self.metaSize(self.vg)
            self.vg.extendLV("metadata", newsize)

        finally:
            self._extendlock.release()

    def disconnect(self):
        '''
        Make sure all LVs are inactive.
        '''
        sd.StorageDomain.disconnect(self)
        if self.vg:
            self.vg.deactivate()

    def mapMetaOffset(self, vol_name):
        (dev, ext) = self.vg.getFirstExt(vol_name)
        self.log.debug("vol %s dev %s ext %s" % (vol_name, dev, ext))
        for pv in self.readMetadataMapping():
            self.log.debug("MAPOFFSET: pv %s -- dev %s ext %s" % (pv, dev, ext))
            pestart = int(pv["pestart"])
            pecount = int(pv["pecount"])
            if (os.path.basename(dev) == pv["guid"] and
                int(ext) in range(pestart, pestart + pecount)):

                offs =  int(ext) + int(pv["mapoffset"])
                if offs < sd.SD_METADATA_SIZE/sd.METASIZE:
                    raise se.MetaDataMappingError("domain %s: vol %s MD offset %s is bad - will overwrite SD's MD" % (self.sdUUID, vol_name, offs))
                return offs
        raise se.MetaDataMappingError("domain %s: can't map PV %s ext %s" % (self.sdUUID, dev, ext))

    def readMetadataMapping(self):
        meta = self.Metadata()
        mapping = []
        for l in meta:
            if not l.startswith("PV") or l.find("=") < 0:
                continue
            pv = l.split("=")[1].split(",")
            map = {}
            for v in pv:
                if v.find(":") < 0:
                    continue
                v = v.split(":")
                if v[0] == "pv":
                    map["guid"] = v[1]
                if v[0] == "uuid":
                    map["uuid"] = v[1]
                if v[0] == "pestart":
                    map["pestart"] = int(v[1])
                if v[0] == "pecount":
                    map["pecount"] = int(v[1])
                if v[0] == "mapoffset":
                    map["mapoffset"] = int(v[1])
            mapping.append(map)
        self.log.info("META MAPPING: %s" % mapping)
        return mapping


    def initSPMlease(self):
        """
        Initialize the SPM lease
        """
        (rc, out, err) = safelease.initLock(self.vg.lvPath(sd.LEASES))
        if rc != 0:
            self.log.error("blockSD.create could not initialise spm lease (%s): %s" % (rc, out))
        else:
            self.log.debug("blockSD.create - lease initialized successfully")


    def refreshPerms(self):
        """
        Refresh the permissions on the special Block Storage Domain volumes.
        """
        _refreshVGPerms(self.vg)

    def selftest(self):
        """
        Run the underlying VG validation routine
        """
        return self.validate()

    def validate(self):
        """
        Validate that the storage domain is accessible.
        """
        self.log.info("(BlockStorageDomain.validate) sdUUID=%s", self.sdUUID)
        self.refreshDirTree()
        self.refreshPerms()
        self.Metadata(cache=True)
        f = open(os.path.join("/", "dev", "mapper", self.sdUUID.replace("-", "--") + "-ids"), "r")
        f.seek(100) #TODO: make random
        f.read(1)
        f.close()
        return True


    def invalidate(self):
        """
        Make sure that storage domain is inaccessible.
        1. Make sure master LV is not mounted
        2. Deactivate all the volumes from the underlying VG
        3. Destroy any possible dangling maps left in device mapper
        4. Remove any possible dangling links in /dev
        """
        try:
            self.unmountMaster()
        except se.StorageDomainMasterUnmountError:
            self.log.warning("Unable to unmount master LV during invalidateSD")
        except se.CannotDeactivateLogicalVolume:
            # It could be that at this point there is no LV, so just ignore it
            pass
        except Exception, e:
            # log any other exception, but keep going
            misc.logException(e)

        # Be really paranoid, since we have to get to following dmRemoveMapping
        # at any cost
        try:
            self.vg.deactivate()
        except Exception, e:
            # log any other exception, but keep going
            misc.logException(e)

        try:
            misc.dmRemoveMapping(self.vg.name)
        except Exception, e:
            # log any other exception, but keep going
            misc.logException(e)

        misc.cleanupdir(os.path.join("/dev", self.vg.name))

    def validateMaster(self):
        """Validate that the master storage domain is correct.
        """
        stat = {'mount' : True, 'valid' : True}
        if not self.isMaster():
            return stat

        masterdir = os.path.join(self.domaindir, sd.MASTER_DIR_NAME)
        # If the host is SPM then at this point masterFS should be mounted
        # In HSM case we can return False and then upper logic should handle it
        if not os.path.ismount(masterdir):
            stat['mount'] = False
            return stat

        pdir = os.path.join(masterdir, sd.VMS_DIR_NAME)
        if not misc.fileexists(pdir):
            stat['valid'] = False
            return stat
        pdir = os.path.join(masterdir, sd.TASKS_DIR_NAME)
        if not misc.fileexists(pdir):
            stat['valid'] = False
            return stat

        return stat

    def format(self):
        """Format detached storage domain.
           This removes all data from the storage domain.
        """
        # Remove the directory tree
        misc.cleanupdir(self.domaindir, ignoreErrors = False)
        # Remove special metadata and service volumes
        # Remove all volumes LV if exists
        _removeVMSfs(os.path.join("/dev", self.vg.name, MASTERLV))
        lvList = self.vg.listLVS()
        for lv in lvList:
            self.vg.removeLV(lv)

        # Remove SD tag
        self.vg.remSDTag()
        return True

    def getInfo(self):
        """
        Get storage domain info
        """
        ##self.log.info("(BlockStorageDomain.getInfo) sdUUID=%s", self.sdUUID)
        # First call parent getInfo() - it fills in all the common details
        info = sd.StorageDomain.getInfo(self)
        # Now add blockSD specific data
        info['vguuid'] = self.vg.uuid
        info['state'] = self.vg.getState()
        return info

    def getStats(self):
        """
        """
        vgMetadataStatus = self.vg.metadataValidity()
        return dict(disktotal=str(self.vg.vgsize),
                    diskfree=str(self.vg.vgfree),
                    mdavalid=vgMetadataStatus['mdavalid'],
                    mdathreshold=vgMetadataStatus['mdathreshold'])

    def getAllImages(self):
        """
        Get list of all images
        """
        vols = self.vg.listTags()
        img = {}
        for v in vols:
            for t in v:
                if t.startswith("IU_"):
                    img[t[3:]] = None
        return img.keys()

    def getIsoList(self, extension):
        """Get list of all ISO/Floppy images
            'extension' - 'iso'/'floppy' for ISO/Floppy images
        """
        isolist = []
        return isolist

    def mountMaster(self):
        """
        Mount the master metadata file system. Should be called only by SPM.
        """
        self.vg.setrwLV(MASTERLV, rw=True)
        self.vg.activateLV(MASTERLV)
        masterdir = os.path.join(self.domaindir, sd.MASTER_DIR_NAME)
        masterfsdev = os.path.join("/dev", self.vg.name, MASTERLV)
        if not os.path.exists(masterdir):
            os.makedirs(masterdir)
        cmd = [constants.EXT_FSCK, "-p", masterfsdev]
        (rc, out, err) = misc.execCmd(cmd)
        # fsck exit codes
        # 0    - No errors
        # 1    - File system errors corrected
        # 2    - File system errors corrected, system should
        #        be rebooted
        # 4    - File system errors left uncorrected
        # 8    - Operational error
        # 16   - Usage or syntax error
        # 32   - E2fsck canceled by user request
        # 128  - Shared library error
        if rc == 1 or rc == 2:
           # rc is a number
           self.log.info("(BlockSD.mountMaster) fsck corrected fs errors (%s)", rc)
        if rc >= 4:
            raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc)

        # TODO: Remove when upgrade is only from a version which creates ext3
        # Try to add a journal - due to unfortunate circumstances we exposed
        # to the public the code that created ext2 file system instead of ext3.
        # In order to make up for it we are trying to add journal here, just
        # to be sure (and we have fixed the file system creation).
        # If there is a journal already tune2fs will do nothing, indicating this
        # condition only with exit code. However, we do not really care.
        cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev]
        misc.execCmd(cmd)

        cmd = [constants.EXT_MOUNT, masterfsdev, masterdir]
        (rc, out, err) = misc.execCmd(cmd)
        # mount exit codes
        # mount has the following return codes (the bits can be ORed):
        # 0      success
        # 1      incorrect invocation or permissions
        # 2      system error (out of memory, cannot fork, no more loop devices)
        # 4      internal mount bug or missing nfs support in mount
        # 8      user interrupt
        # 16     problems writing or locking /etc/mtab
        # 32     mount failure
        # 64     some mount succeeded
        if rc != 0:
            raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out)

        cmd = [constants.EXT_CHOWN, vg.USER_GROUP, masterdir]
        (rc, out, err) = misc.execCmd(cmd)
        if rc != 0:
            self.log.error("(BlockSD.mountMaster) failed to chown %s",
                masterdir)

    @classmethod
    def __handleStuckUmount(cls, masterdir):
        umountPids = misc.pgrep("umount")
        for umountPid in umountPids:
            try:
                state = misc.pidStat(umountPid)[2].lower()
                mountPoint = misc.getPidCmdline(umountPid).split('\0')[-2]
            except:
                # Process probably exited
                continue

            if mountPoint != masterdir:
                continue

            if state != "d":
                # If the umount is not in d state there
                # is a possiblity that the world might
                # be in flux and umount will get stuck
                # in an unkillable state that is not D
                # which I don't know about, perhaps a
                # bug in umount will cause umount to
                # wait for something unrelated that is
                # not the syscall. Waiting on a process
                # which is not your child is race prone
                # I will just call for another umount
                # and wait for it to finish. That way I
                # know that a umount ended.
                try:
                    ros.umount(masterdir, True)
                    return
                except:
                    # timeout! we are stuck again.
                    # if you are here spmprotect forgot to
                    # reboot the machine but in any case
                    # continue with the disconnection.
                    pass

            cls.log.warn("umount stuck, trying to snap it out of it by disconnecting storage")
            try:
                vgName = masterdir.rsplit("/", 2)[1]
                masterDev = os.path.join("/dev/mapper", vgName.replace("-", "--") + "-" + MASTERLV)
            except KeyError:
                # Umount succeeded after all
                return

            cls.log.debug("master mount resource is `%s`, trying to disconnect underlying storage", masterDev)
            iscsi.disconnectFromUndelyingStorage(masterDev)
            return

    @classmethod
    def doUnmountMaster(cls, masterdir):
        """
        Unmount the master metadata file system. Should be called only by SPM.
        """
        # fuser processes holding mount point and validate that the umount succeeded
        cls.__handleStuckUmount(masterdir)
        if os.path.ismount(masterdir):
            # Try umount, take 1
            try:
                ros.umount(masterdir, True)
            except:
                cls.log.warn("umount failed because of an exception trying to handle it anyway", exc_info=True)

            if os.path.ismount(masterdir):
                # umount failed, try to kill that processes holding mount point
                fuser_cmd = [constants.EXT_FUSER, "-m", masterdir]
                (rc, out, err) = misc.execCmd(fuser_cmd)

                # It was unmounted while I was checking no need to do anything
                if not os.path.ismount(masterdir):
                    return
                cls.log.warn(out)
                for match in out[0].split():
                    try:
                        pid = int(match)
                    except ValueError:
                        # Match can be "kernel"
                        continue

                    try:
                        cls.log.debug("Trying to kill pid %d", pid)
                        os.kill(pid, signal.SIGKILL)
                    except OSError, e:
                        if e.errno == errno.ESRCH: # No such process
                            pass
                        elif e.errno == errno.EPERM: # Operation not permitted
                            cls.log.warn("Could not kill pid %d because operation was not permitted", pid)
                        else:
                            cls.log.warn("Could not kill pid %d because an unexpected error", exc_info = True)
                    except:
                        cls.log.warn("Could not kill pid %d because an unexpected error", exc_info = True)

                # Try umount, take 2
                try:
                    ros.umount(masterdir, True)
                except:
                    cls.log.warn("umount failed because of an exception trying to handle it anyway", exc_info=True)
                if os.path.ismount(masterdir):
                    # We failed to umount masterFS
                    # Forcibly rebooting the SPM host would be safer. ???
                    raise se.StorageDomainMasterUnmountError(masterdir, rc)

    def unmountMaster(self):
        """
        Unmount the master metadata file system. Should be called only by SPM.
        """
        masterdir = os.path.join(self.domaindir, sd.MASTER_DIR_NAME)
        self.doUnmountMaster(masterdir)
        # It is time to deactivate the master LV now
        self.vg.deactivateLV(MASTERLV)


    def refreshDirTree(self):
        # create domain images folder
        imagesPath = os.path.join(self.domaindir, sd.DOMAIN_IMAGES)
        if not os.path.exists(imagesPath):
            os.makedirs(imagesPath)

        # create domain special volumes folder
        dom_md = os.path.join(self.domaindir, sd.DOMAIN_META_DATA)
        if not os.path.exists(dom_md):
            os.makedirs(dom_md)

        if not os.path.exists(os.path.join("/dev", self.sdUUID, sd.IDS)):
            self.refreshSpecialVolumes()

        if not os.path.lexists(os.path.join(dom_md, sd.METADATA)):
            os.symlink(self.metavol, os.path.join(dom_md, sd.METADATA))

        if not os.path.lexists(os.path.join(dom_md, sd.LEASES)):
            os.symlink(os.path.join("/dev", self.sdUUID, sd.LEASES),
                os.path.join(dom_md, sd.LEASES))

        if not os.path.lexists(os.path.join(dom_md, sd.IDS)):
            os.symlink(os.path.join("/dev", self.sdUUID, sd.IDS),
                os.path.join(dom_md, sd.IDS))

        if not os.path.lexists(os.path.join(dom_md, sd.INBOX)):
            os.symlink(os.path.join("/dev", self.sdUUID, sd.INBOX),
                os.path.join(dom_md, sd.INBOX))

        if not os.path.lexists(os.path.join(dom_md, sd.OUTBOX)):
            os.symlink(os.path.join("/dev", self.sdUUID, sd.OUTBOX),
                os.path.join(dom_md, sd.OUTBOX))

        if not os.path.lexists(os.path.join(dom_md, MASTERLV)):
            os.symlink(os.path.join("/dev", self.sdUUID, MASTERLV),
                os.path.join(dom_md, MASTERLV))

        # create special imageUUID for ISO/Floppy volumes
        iso_imagesPath = os.path.join(imagesPath, sd.ISO_IMAGE_UUID)
        if self.isISO() and not os.path.exists(iso_imagesPath):
            os.mkdir(iso_imagesPath)

    def refreshSpecialVolumes(self):
        self.vg.activateLVs([sd.METADATA, sd.LEASES, sd.IDS, sd.INBOX, sd.OUTBOX, MASTERLV], rw=True)

    def extendVolume(self, volumeUUID, size, isShuttingDown=None):
        self._extendlock.acquire()
        try:
            self.vg.extendLV(volumeUUID, size) #, isShuttingDown) # FIXME

        finally:
            self._extendlock.release()

    def refresh(self):
        self.refreshSpecialVolumes()
        self.refreshDirTree()
        self.vg.refresh()


def _createVMSfs(dev):
    """
    Create a special file system to store VM data
    """
    cmd = [constants.EXT_MKFS, "-q", "-j", dev]
    rc = misc.execCmd(cmd)[0]


def _removeVMSfs(dev):
    """
    Destroy special VM data file system
    """
    # XXX Add at least minimal sanity check:. i.e. fs not mounted
    pass


def _refreshVGPerms(vgroup):
    """
    Refresh the permissions on the special Block Storage Domain volumes.
    """
    vgdir = os.path.join("/dev", vgroup.name)
    volumes = [sd.METADATA, sd.LEASES, sd.IDS, sd.INBOX, sd.OUTBOX]
    cmd = ([constants.EXT_CHOWN, vg.USER_GROUP, vgdir] +
        [os.path.join(vgdir, v) for v in volumes])
    rc = misc.execCmd(cmd)[0]
    if rc != 0:
        raise se.VolumeGroupPermissionsError(vgroup.name)

def getBlockStorageDomainList():
    """
    Collect all the actual BlockSD and remove all the stale ones (if any)
    """
    # First get the list of all the domains we think there are
    stale = glob.glob(os.path.join(BlockStorageDomain.mountpoint, "*"))
    try:
        # Now get the actual BlockSD list
        vgl = vg.getVGList(sdonly=True)
    except Exception, e:
        log.warning("getBlockStorageDomainList: Can't get VG list (%s) " % (str(e)))
        misc.logException(e)
        # Nothing to do without vglist
        return []

    dl = []

    for v in vgl:
        try:
            dl.append(BlockStorageDomain(v.name, v))
        except Exception, e:
            log.warning("getBlockStorageDomainList: Can't create domain from VG=%s (%s) " % (v.name, str(e)))
            misc.logException(e)

    try:
        for i in dl:
            # Remove real domain from the stale list
            if i.domaindir in stale:
                stale.remove(i.domaindir)
            i.refreshDirTree()
        # Finally clean up all the stale leftovers
        for i in stale:
            misc.cleanupdir(i)
    except Exception, e:
        log.warning("getBlockStorageDomainList: Can't remove stale leftovers (%s) " % (str(e)))
        misc.logException(e)

    return dl
