#!/usr/bin/env python2

import os
import errno
import re
import ConfigParser
import json
import socket
import time
import mmap
import shutil
import sys
import pprint
import traceback
from StringIO import StringIO

from disk import Disk
from raid import RAID
from cache_manage import CommonCache
from mega_raid import MegaRAID
from pool_manage import PoolManage
from utils import Exp, _dmsg, _dwarn, _derror, _human_readable, _human_unreadable, _exec_pipe1, _exec_system, \
        _str2dict, _syserror, _syswarn, _lock_file1, _unlock_file1, _is_docker, ensure_path, _exec_shell1, _exec_pipe, list_equal
from bcache import Bcache
from names import DevName, DiskName
from lsblk import LSBLK
from db_sqlite3 import Sqlite

from tlog import tlog

"""
:support normal and NVMe devices
:support meta | wlog partition
:support bcache

meta:
    must be mounted before use (lichd, etcd etc. services)
    /etc/fstab: mount -o dirsync

SPDK:
    load_search both nvme and pci

"""

DISK_MAX_TIER = 1
DISK_MAX_NUM = 255

BCACHE_HEADER_LEN = 8192

# struct ext4_super_block offset = 1024; ext4_super_block.s_reserved offset = 588;
VALID_DATA_OFFSET = 1024 + 588
# sizeof(ext4_super_block.s_reserved) = 436
VALID_DATA_LEN = 436

DISK_INFO_OFFSET = 4096
FILE_BLOCK_LEN = 4096

ZERO_CSET_UUID = '00000000-0000-0000-0000-000000000000'
INVALID_CSET_UUID = ''


class BcacheDev(object):
    def __init__(self, config):
        self.config = config

        self.sys_fs = '/sys/fs/bcache'

        self.clusteruuid = self.config.getclusteruuid()
        self.hostname = self.config.hostname

    def disk_relation_recover(self):
        block_list = os.listdir("/sys/block")

        try:
            cmd = "cat /proc/devices | grep ' bcache' | awk '{print $1}'"
            out, err = _exec_shell1(cmd, p=False)
            bdev_num = out.strip()
        except Exp, e:
            raise

        for dev in block_list:
            if dev.startswith("bcache"):
                bdev = "/dev/" + dev
                if not os.path.exists(bdev):
                    cmd = "mknod %s b %s %s" % (bdev, bdev_num, bdev[11:])
                    _exec_shell1(cmd, p=True)

    def read_meta(self, dev_or_path, match_cluster_and_node=True, bcache_header=False):
        """
        :todo /opt/fusionstack/data/disk/block/%.block

        :param dev_or_path:
        :param match_cluster_and_node:
        :param bcache_header:
        :return:
        """
        meta = None

        # TODO non-bcache config
        # if DevName(dev_or_path).is_bcache():
        #     bcache_header = False
        # else:
        #     bcache_header = True

        offset = VALID_DATA_OFFSET
        if bcache_header:
            offset += BCACHE_HEADER_LEN

        try:
            fd = open(dev_or_path, 'rb')
            fd.seek(offset, 0)
            buff = fd.read(VALID_DATA_LEN)
            fd.close()
        except:
            # traceback.print_exc()
            return None, ''

        m = re.match('cluster=(\S+);node=(\S+);type=(\w+);disk=(\d+);pool=(\S+);cache=(\d+);cached=(\d+);', buff)
        if m:
            if match_cluster_and_node and not (m.group(1) == self.clusteruuid and m.group(2) == self.hostname):
                meta = None
            else:
                meta = {
                    'cluster': m.group(1),
                    'node': m.group(2),
                    'type': m.group(3),
                    'disk': int(m.group(4)),
                    'pool': m.group(5),
                    'cache': int(m.group(6)),
                    'cached': int(m.group(7))
                }

                m = re.match('.*cset=(\S+);', buff)
                if m:
                    meta['cset'] = m.group(1)
                else:
                    meta['cset'] = INVALID_CSET_UUID
        else:
            m = re.match('cluster=(\S+);node=(\S+);type=(\w+);', buff)
            if m:
                meta = {
                    'cluster': m.group(1),
                    'node': m.group(2),
                    'type': m.group(3),
                }

        return meta, buff

    def read_meta2(self, dev_or_path):
        fd = open(dev_or_path, 'rb')
        fd.seek(VALID_DATA_OFFSET, 0)
        buff = fd.read(VALID_DATA_LEN)
        fd.close()

        m = re.match('cluster=%s;node=%s;type=(\w+);(disk=(\d+);pool=([^;]+);)?' % (
            self.clusteruuid, self.hostname), buff)

        print m.group(1)
        print m.group(2)
        print m.group(3)
        print m.group(4)

    def write_meta(self, dev_or_path, buff_or_dict, bcache_header=False):
        if isinstance(buff_or_dict, dict):
            buff = 'cluster=%s;node=%s;type=%s;disk=%d;pool=%s;cache=%d;cached=%d;cset=%s;\0' % (
                buff_or_dict['cluster'],
                buff_or_dict['node'],
                buff_or_dict['type'],
                buff_or_dict['disk'],
                buff_or_dict['pool'],
                buff_or_dict['cache'],
                buff_or_dict['cached'],
                buff_or_dict['cset'])
        else:
            buff = buff_or_dict

        offset = VALID_DATA_OFFSET
        if bcache_header:
            offset += BCACHE_HEADER_LEN

        _dmsg('write_meta: path %s, meta %s, offset:%d\n' % (dev_or_path, buff, int(offset)))

        fd = open(dev_or_path, 'wb')
        fd.seek(offset, 0)
        fd.write(buff)
        fd.flush()
        fd.close()

    def is_bcache_dev(self, dev):
        return DevName(dev).is_bcache()

    def clean_meta(self, dev):
        if self.is_bcache_dev(dev):
            cmd = 'dd if=/dev/zero of=%s bs=1M count=1' % dev
            try:
                (out_msg, err_msg) = _exec_pipe1(cmd, 0, False)
            except Exp, e:
                _syswarn(" cmd, %s" % err_msg)

    def update_meta(self, dev):
        if self.is_bcache_dev(dev):
            bcache_header = False
        else:
            bcache_header = True

        meta, buff = self.read_meta(dev, bcache_header=bcache_header)
        if meta:
            if meta['type'] == 'new':
                meta['type'] = 'data'
                self.write_meta(dev, meta, bcache_header=bcache_header)

    def __parse_sd(self, path, n=-2):
        rpath = os.path.realpath(path)
        l = rpath.split('/')
        return l[n]

    def backing_device_by_bcache(self, dev):
        dname = DevName(dev)
        if dname.is_bcache():
            path = '/sys/block/%s/bcache/dev/bcache' % dname.name
            return self.__parse_sd(path)
        else:
            return dname.dev

    def backing_device_by_cset(self, cset_uuid, verbose=False):
        path = os.path.join('/sys/fs/bcache', cset_uuid)
        if os.path.isdir(path):
            files = os.listdir(path)
        else:
            files = []

        if verbose:
            for i in files:
                if i.startswith('bdev') and len(i) > len('bdev'):
                    rpath = os.path.join(path, i)
                    if os.path.islink(rpath):
                        if not os.path.exists(os.path.realpath(rpath)):
                            _derror('  invalid link %s' % os.path.realpath(rpath))

        bdevs = []
        for i in files:
            if i.startswith('bdev') and len(i) > len('bdev'):
                sd = self.__parse_sd(os.path.join(path, i))
                bdevs.append(sd)

        bdevs.sort()
        return bdevs

    def cache_devcie_by_cset(self, cset_uuid):
        return self.__parse_sd(os.path.join(self.sys_fs, cset_uuid, 'cache0'))

    def is_registered(self, dev):
        dname = DevName(dev)
        path = '/sys/block/%s/bcache' % dname.name
        return os.path.isdir(path)

    def cset_list(self, verbose=False):
        d = {}

        if not os.path.isdir(self.sys_fs):
            return d

        for uuid in os.listdir(self.sys_fs):
            if not os.path.isdir(os.path.join(self.sys_fs, uuid)):
                continue

            bdevs = self.backing_device_by_cset(uuid, verbose)
            d[uuid] = bdevs

        return d


class LichDisk(object):
    def __init__(self, config):
        self.config = config

        self.disk_file_list = ['disk', 'block', 'info', 'bitmap', 'tier', 'speed', 'rotation']

    def get_next_disk_num(self):
        disk_num = 0

        while disk_num <= DISK_MAX_NUM:
            if self.is_disk_exists(disk_num):
                disk_num += 1
            elif self.is_bitmap_exists(disk_num):
                disk_num += 1
            elif self.is_info_exists(disk_num):
                disk_num += 1
            else:
                break

        if disk_num > DISK_MAX_NUM:
            raise Exp(errno.EPERM, 'There is no number left')

        return disk_num

    def get_max_disk_num(self):
        for disk_num in range(DISK_MAX_NUM - 1, -1, -1):
            if self.is_disk_exists(disk_num):
                break
            elif self.is_info_exists(disk_num):
                break
            elif self.is_bitmap_exists(disk_num):
                break

        return disk_num

    def get_file_ctime(self, disk_num):
        path = self.get_path(disk_num, 'disk')
        return int(os.path.getctime(path))

    def get_path(self, disk_or_disk_num, which):
        assert which in self.disk_file_list

        disk_num = DiskName(disk_or_disk_num).num
        return os.path.join(self.config.home, 'data/disk', which, '%s.%s' % (disk_num, which))

    def nvme_prefix(self):
        return os.path.join(self.config.home, 'data/disk/nvme')

    @staticmethod
    def nvme_device(pool, disk_num, dev):
        return "pci_" + pool + '_disk' + str(disk_num) + '_' + dev

    def __is_file_exists(self, disk_num, which):
        path = self.get_path(disk_num, which)
        return os.path.exists(path)

    def is_disk_exists(self, disk_num):
        return self.__is_file_exists(disk_num, "disk")

    def is_info_exists(self, disk_num):
        return self.__is_file_exists(disk_num, "info")

    def is_block_exists(self, disk_num):
        return self.__is_file_exists(disk_num, "block")

    def is_bitmap_exists(self, disk_num):
        return self.__is_file_exists(disk_num, "bitmap")

    def unlink(self, disk):
        print disk
        for x in self.disk_file_list:
            path = self.get_path(disk, x)
            if os.path.exists(path):
                if x == 'disk':
                    rpath = os.path.realpath(path)
                    if os.path.isfile(rpath) and os.path.basename(rpath).startswith('pci_'):
                        _derror('  %s' % rpath)
                        os.unlink(rpath)

                _derror('  %s' % path)
                os.unlink(path)


class UIODevice(object):
    def __init__(self, pci, driver='uio_pci_generic'):
        self.pci = pci
        self.driver = driver

    def is_binding(self):
        driver = '/sys/bus/pci/devices/%s/driver' % self.pci
        if os.path.islink(driver):
            path = os.readlink(driver)
            if os.path.basename(path) == self.driver:
                return True
        return False

    def get_new_id(self):
        cmd = ['lspci', '-n', '-s', self.pci]
        out, err = _exec_pipe1(cmd, retry=1, p=False)
        l = out.split()
        pci = l[2]
        return pci.replace(':', ' ')

    def new_id(self):
        new_id = self.get_new_id()
        _exec_system("echo '%s' > /sys/bus/pci/drivers/%s/new_id" % (new_id, self.driver))

    def bind(self):
        if not self.is_binding():
            _exec_system('modprobe uio_pci_generic')

            self.new_id()
            time.sleep(1)

        if not self.is_binding():
            _exec_system("echo '%s' > /sys/bus/pci/drivers/%s/bind" % (self.pci, self.driver))

    def unbind(self):
        if self.is_binding():
            _exec_system("echo '%s' > /sys/bus/pci/drivers/%s/unbind" % (self.pci, self.driver))


class DiskManage(object):
    def __init__(self, node):
        self.node = node

        self.config = node.config
        self.cacheconf = None

        self.disk_home = os.path.join(self.config.home, 'data/disk/disk')
        self.tier_path = os.path.join(self.config.home, 'data/disk/tier')
        self.speed_path = os.path.join(self.config.home, 'data/disk/speed')
        self.rotation_path = os.path.join(self.config.home, 'data/disk/rotation')

        self.tier_withtype = False
        self.disk_search = None

        self.pool_manage = PoolManage(self.config)

        self.raid = None
        self.disk = Disk()

        self.lichDisk = LichDisk(self.config)

        self.commoncache = CommonCache(node)
        self.bdev = BcacheDev(self.config)

        self.wait_for_device_interval = 60

    def ssd_models(self):
        models = []
        path = os.path.join(self.config.home, 'etc/ssd.models')
        if not os.path.exists(path):
            return []

        with file(path, 'r') as fd:
            for line in fd.readlines():
                line = line.strip()
                if not len(line) or line.startswith('#'):
                    continue
                models.append(line)

        return models

    def parse_cacheconf(self, cacheconf, devs=None):
        cache = {}

        if (self.config.testing):
            return cache

        lich_disk = self.__get_lich_disk()
        if devs is not None:
            lich_disk.extend([('diskx', x, None) for x in devs])
        sys_dev = self.disk.get_sys_dev()
        for dev in cacheconf:
            if dev == 'ssd' or dev == 'hdd':
                cache[dev] = cacheconf[dev]
            elif dev.startswith('disk'):
                for item in lich_disk:
                    if item[0] == dev:
                        cache[item[1]] = cacheconf[dev]
            elif dev == 'sysdev':
                for item in sys_dev:
                    cache[item] = cacheconf[dev]

        models = self.ssd_models()
        if not self.raid:
            self.raid = RAID(self.config)
            if self.raid.raid_type == 'HPRAID':
                return cache

        for (disk,dev,pool) in lich_disk:
            if disk.startswith('disk'):
                dev_type = 'UNKNOW'
                try:
                    dev_type = self.disk.get_dev_type(dev)

                    if dev_type != 'RAID':
                        continue

                    dev = self.disk.get_dev(dev)
                    model = self.raid.disk_model(dev)
                    if any(m in model for m in models):
                        if dev not in cache and not cache['ssd']['skip']:
                            cache[dev] = {}
                            cache[dev]['raid_cache'] = 'disable'
                            cache[dev]['disk_cache'] = 'enable'
                except:
                    continue

        cache['spdk'] = {'skip':True}
        cache['bcache'] = {'skip':True}
        return cache

    def __get_disk_tiermask(self, disk_speed):
        mask = 0
        while disk_speed:
            disk_speed /= 10
            mask += 1

        return mask

    def __get_disk_tiermask_withspeed(self, lich_speed):
        mask = 0
        lich_mask = {}
        lich_mask_new = {}

        sort_speed = sorted(lich_speed.items(), key=lambda d: d[1])
        for i in range(len(sort_speed)):
            if mask not in lich_mask:
                lich_mask[mask] = []
            lich_mask[mask].append(sort_speed[i][0])
            if i == len(sort_speed) - 1:
                break
            if sort_speed[i + 1][1] > 2 * sort_speed[i][1]:
                mask += 1

        for i in lich_mask:
            for j in lich_mask[i]:
                lich_mask_new[j] = i

        return lich_mask_new

    def __get_disk_tier_withmask(self, lich_tier):
        max_tier = 0
        for disk in lich_tier:
            if lich_tier[disk] > max_tier:
                max_tier = lich_tier[disk]

        for disk in lich_tier:
            lich_tier[disk] = max_tier - lich_tier[disk]

        return lich_tier

    def __get_disk_tiermask_withrotation(self, lich_tier, lich_rotation):
        rotation_arr = []
        tier = 0
        for i in lich_rotation:
            if lich_rotation[i] not in rotation_arr:
                rotation_arr.append(lich_rotation[i])
        rotation_arr.sort()
        for i in lich_tier:
            tier = rotation_arr.index(lich_rotation[i])
            lich_tier[i] = tier

        return lich_tier

    def __get_lich_info(self, path):
        lich_info = {}
        if os.path.exists(path):
            for info in os.listdir(path):
                disk_num = int(info.split('.')[0])
                fp = open(os.path.join(path, info))
                value = fp.read().strip('\n').strip('\0')
                fp.close()
                try:
                    lich_info[disk_num] = int(value)
                except:
                    pass

        return lich_info

    def __get_pool_info(self, path, pool):
        pool_info = {}
        lich_info = self.__get_lich_info(path);
        pool_disk = self.get_pool_disk(pool)
        for i in pool_disk:
            if i in lich_info:
                pool_info[i] = lich_info[i]

        return pool_info

    def __get_lich_tier(self):
        return self.__get_lich_info(self.tier_path)

    def __get_pool_tier(self, pool):
        return self.__get_pool_info(self.tier_path, pool)

    def __get_lich_writeback_cache(self, dev):
        meta, buff = self.bdev.read_meta(dev)
        if meta:
            cache = meta['cache']
        else:
            cache = 0

        return cache

    def __get_lich_writeback_fromdisk(self):
        lich_writeback = {}

        lich_link = self.__get_lich_link()

        for i in lich_link:
            dev = lich_link[i]

            try:
                meta, _buff = self.bdev.read_meta(dev)

                lich_writeback[i] = {}
                if meta:
                    lich_writeback[i]['type'] = meta['type']
                    lich_writeback[i]['cache'] = meta['cache']
                    lich_writeback[i]['cached'] = meta['cached']
                else:
                    lich_writeback[i]['type'] = 'UNKNOW'
                    lich_writeback[i]['cache'] = 0
                    lich_writeback[i]['cached'] = 0
            except:
                continue

        return lich_writeback

    def __get_lich_writeback_fromdata(self):
        lich_writeback = {}

        block_path = os.path.join(self.config.home, 'data/disk/block')
        if not os.path.exists(block_path):
            return lich_writeback

        for disk in os.listdir(block_path):
            i = int(disk.split('.')[0])
            disk_path = os.path.join(block_path, disk)

            meta, _buff = self.bdev.read_meta(disk_path)

            lich_writeback[i] = {}
            if meta:
                lich_writeback[i]['type'] = meta['type']
                lich_writeback[i]['cache'] = meta['cache']
                lich_writeback[i]['cached'] = meta['cached']
            else:
                lich_writeback[i]['type'] = 'UNKNOW'
                lich_writeback[i]['cache'] = 0
                lich_writeback[i]['cached'] = 0

        return lich_writeback

    def __get_lich_writeback(self):
        return self.__get_lich_writeback_fromdisk()

    def __check_lich_speed(self):
        lich_link = self.__get_lich_link()
        lich_writeback = self.__get_lich_writeback_fromdisk()

        for i in lich_link:
            if lich_writeback[i]['type'] != 'data' or lich_writeback[i]['cache'] == 100:
                continue

            path = os.path.join(self.speed_path, str(i) + '.speed')
            if not os.path.exists(path):
                print "get %s speed start..." % lich_link[i]
                disk_speed = self.disk.get_dev_speed(lich_link[i])
                print "get %s speed %d" %(lich_link[i], disk_speed)
                _exec_system("echo %d > %s" %(disk_speed, path), True)

        for info in os.listdir(self.speed_path):
            disk_num = int(info.split('.')[0])
            if disk_num not in lich_link:
                _exec_system("rm -rf " + os.path.join(self.speed_path, info), True)

    def __get_lich_speed(self):
        return self.__get_lich_info(self.speed_path)

    def __get_pool_speed(self, pool):
        return self.__get_pool_info(self.speed_path, pool)

    def __check_lich_rotation(self):
        lich_link = self.__get_lich_link()
        lich_writeback = self.__get_lich_writeback_fromdisk()

        for i in lich_link:
            if lich_writeback[i]['type'] != 'data' or lich_writeback[i]['cache'] == 100:
                continue

            path = os.path.join(self.rotation_path, str(i) + '.rotation')
            if not os.path.exists(path):
                disk_rotation = self.__get_disk_rotation(lich_link[i])
                if disk_rotation is not None:
                    _exec_system("echo %d > %s" %(disk_rotation, path), True)

        for info in os.listdir(self.rotation_path):
            disk_num = int(info.split('.')[0])
            if disk_num not in lich_link:
                _exec_system("rm -rf " + os.path.join(self.rotation_path, info), True)

    def __get_lich_rotation(self):
        return self.__get_lich_info(self.rotation_path)

    def __get_pool_rotation(self, pool):
        return self.__get_pool_info(self.rotation_path, pool)

    def __get_disk_stat(self, lich_disk):
        disk_stat = {}
        diskstat = os.path.join(self.config.shm, 'nodectl/diskstat')

        if os.path.exists(diskstat):
            for disk in lich_disk:
                if not disk[0].startswith('disk'):
                    continue

                disk_num = disk[0][4:]
                stat_file = os.path.join(diskstat, "%s/%s.stat" % (disk[2], disk_num))
                if not os.path.exists(stat_file):
                    continue

                o = open(stat_file)
                res = o.read()
                o.close()
                d = _str2dict(res)
                disk_stat['disk' + disk_num] = d

        return disk_stat

    def __get_disk_rotation(self, dev):
        dev_type = self.disk.get_dev_type(dev)
        dev_rotation = None
        if dev_type == 'RAID':
            if not self.raid:
                self.raid = RAID(self.config)
            dev_rotation = self.raid.disk_rotation(self.disk.get_dev(dev))
        elif dev_type == 'HDD':
            dev_rotation = self.disk.get_dev_rotation(self.disk.get_dev(dev))
        else:
            pass
        return dev_rotation

    def __get_disk_pool(self, disk_num):
        if self.disk_search is None:
            self.disk_search = self.__disk_load_search()

        for disk in self.disk_search:
            if disk[0] == 'disk' + disk_num:
                return disk[2]

        return None

    def __get_spdk_pool(self, dev):
        return dev.split('_')[1], dev.split('_')[-1]

    def __get_all_pool(self):
        all_pool = []
        if self.disk_search is None:
            self.disk_search = self.__disk_load_search()

        for disk in self.disk_search:
            if disk[0].startswith('disk'):
                if disk[2] not in all_pool:
                    all_pool.append(disk[2])

        return all_pool

    def __get_lich_disk(self, need_convert=True):
        """ get all online lich devices.

        :todo check pool if valid

        :param need_convert:
        :return:
        """

        lich_disk = []

        # TODO etcd maybe not running
        # pool_list = self.pool_manage.pool_list()

        if not _is_docker():
            meta_path = os.path.join(self.config.home, 'data')
            lich_disk.append(('meta', self.disk.get_dev_bymounted(meta_path), None))

            wlog_path = os.path.join(meta_path, 'wlog')
            lich_disk.append(('wlog', self.disk.get_dev_bymounted(wlog_path), None))

        if os.path.exists(self.disk_home):
            for disk in os.listdir(self.disk_home):
                disk_num = disk.split('.')[0]
                disk_path = os.path.join(self.disk_home, disk)
                if os.path.islink(disk_path):
                    disk_dev = os.readlink(disk_path)
                    if not os.path.exists(disk_dev):
                        continue
                    dev_type = self.disk.get_dev_type(disk_dev)
                    if dev_type == 'SPDK':
                        pool, disk_dev = self.__get_spdk_pool(disk_dev)
                    else:
                        if need_convert:
                            if self.config.cache_enable and disk_dev[5:].startswith(self.config.cache_type):
                                disk_dev = self.commoncache.get_coredev_by_fastdev(disk_dev)

                        pool = self.__get_disk_pool(disk_num)

                    # if pool in pool_list:
                    if pool:
                        lich_disk.append(('disk' + disk_num, disk_dev, pool))

        return lich_disk

    def get_lich_disk(self):
        return self.__get_lich_disk()

    def get_pool_disk(self, pool):
        pool_disk = []
        lich_disk = self.__get_lich_disk()
        for disk in lich_disk:
            if disk[0].startswith('disk') and disk[2] == pool:
                pool_disk.append(int(disk[0][4:]))

        return pool_disk

    def __get_lich_nvme(self, lich_items):
        lich_disk = []
        for (x, dev, pool) in lich_items:
            if dev:
                dev_type = self.disk.get_dev_type(dev)
                if dev_type == 'NVMe':
                    pci = self.disk.get_dev_pci(dev)
                    lich_disk.append(pci)

        return lich_disk

    def __get_lich_dev(self):
        lich_disk = [dev for (x, dev, pool) in self.__get_lich_disk() if dev]

        lich_dev = []
        for disk in lich_disk:
            dev = self.disk.get_dev(disk)
            if dev not in lich_dev:
                lich_dev.append(dev)

        return lich_dev

    def __get_lich_link(self):
        lich_link = {}
        if os.path.exists(self.disk_home):
            for disk in os.listdir(self.disk_home):
                disk_num = disk.split('.')[0]
                disk_path = os.path.join(self.disk_home, disk)
                if os.path.islink(disk_path):
                    disk_dev = os.readlink(disk_path)
                    if not self.disk.is_dev(disk_dev):
                         continue
                    lich_link[int(disk_num)] = disk_dev
        return lich_link

    def get_pool_link(self, pool):
        pool_link = {}
        lich_link = self.__get_lich_link()
        pool_disk = self.get_pool_disk(pool)
        for i in pool_disk:
            if i in lich_link:
                if self.config.cache_enable and self.config.cache_type in lich_link[i]:
                    lich_link[i] = self.commoncache.get_coredev_by_fastdev(lich_link[i])
                pool_link[i] = lich_link[i]

        return pool_link

    def __disk_set_metawlog(self):
        clusteruuid = self.config.getclusteruuid()
        hostname = self.config.hostname

        if _is_docker():
            return

        meta_path = os.path.join(self.config.home, 'data')
        if os.path.ismount(meta_path):
            dev = self.disk.get_dev_bymounted(meta_path)

            fd = open(dev, 'wb')
            fd.seek(VALID_DATA_OFFSET, 0)
            fd.write('cluster=%s;node=%s;type=meta;' % (clusteruuid, hostname))
            fd.close()

            self.disk.set_dev_label(dev, 'lich-meta')

        wlog_path = os.path.join(meta_path, 'wlog')
        if os.path.ismount(wlog_path):
            dev = self.disk.get_dev_bymounted(wlog_path)

            fd = open(dev, 'wb')
            fd.seek(VALID_DATA_OFFSET, 0)
            fd.write('cluster=%s;node=%s;type=wlog;' % (clusteruuid, hostname))
            fd.close()

            self.disk.set_dev_label(dev, 'lich-wlog')

    def __disk_set_clusteruuid(self, items):
        clusteruuid = self.config.getclusteruuid()
        for item in items:
            if not item[1]:
                continue
            dev = item[1]

            meta, _buff = self.bdev.read_meta(dev, match_cluster_and_node=False)
            if meta:
                if meta['cluster'] == clusteruuid:
                    continue

                meta['cluster'] = clusteruuid
                self.bdev.write_meta(dev, meta)

    def __disk_set_nodename(self, items):
        hostname = self.config.hostname
        for item in items:
            if not item[1]:
                continue

            dev = item[1]

            meta, _buff = self.bdev.read_meta(dev, match_cluster_and_node=False)
            if meta:
                if meta['node'] == hostname:
                    continue

                meta['node'] = hostname
                self.bdev.write_meta(dev, meta)

    def disk_setmeta_cset(self, dev, cset_uuid, bcache_header=False, match_cluster_and_node=False):
        meta, _buff = self.bdev.read_meta(dev, match_cluster_and_node, bcache_header)
        if meta:
            if meta['cset'] == cset_uuid:
                return

            meta['cset'] = cset_uuid
            self.bdev.write_meta(dev, meta, bcache_header)

    def disk_set(self, arg):
        lich_disk = self.__get_lich_disk()
        if arg == 'clusteruuid':
            self.__disk_set_clusteruuid(lich_disk)
        elif arg == 'nodename':
            self.__disk_set_nodename(lich_disk)
        elif arg == 'all':
            self.__disk_set_clusteruuid(lich_disk)
            self.__disk_set_nodename(lich_disk)
        elif arg == 'metawlog':
            self.__disk_set_metawlog()
        else:
            raise Exp(errno.EINVAL, '%s is invalid argument, use --help for help' % arg)

    def is_valid_lich_device(self, dev, disk_num):
        # TODO bugfix #11590 /opt/fusionstack/data NOT mount
        if not self.lichDisk.is_info_exists(disk_num):
            return False

        disk_info = self.lichDisk.get_path(disk_num, 'info')

        fd = open(disk_info, 'rb')
        buff = fd.read()
        fd.close()

        fd = open(dev, 'rb')
        fd.seek(DISK_INFO_OFFSET, 0)
        buff1 = fd.read(len(buff))
        fd.close()

        if buff == buff1:
            return True

        return False

    def __disk_load_valid(self, dev, meta_only=False):
        """
        read disk_num from disk

        :param dev:
        :return None | (disk, dev, pool):
        """
        clusteruuid = self.config.getclusteruuid()
        hostname = self.config.hostname
        retry = 0
        while hostname == 'N/A':
            if retry > 10:
                _syserror(" disk_manage, get hostname fail, please check lich.conf or ifconfig")
                raise Exp(errno.EINVAL, 'get hostname fail, please check lich.conf or ifconfig')

            self.config.refresh()
            hostname = self.config.hostname
            time.sleep(1)
            retry += 1

        if self.config.cache_enable:
            if self.config.cache_type in dev:
                valid = self.commoncache.is_valid_cachedev(dev)
                if not valid:
                    # TODO when dump json
                    # _dwarn("%s not a valid cache device, pelase check it!" % dev)
                    return None
            else:
                if self.commoncache.is_running_coredev(dev):
                    self.commoncache.get_mappingdev_by_coredev(dev)
                elif self.commoncache.is_running_cachedev(dev):
                    pass

        try:
            # read diskid from /dev/bcacheN
            # tlog.write(ctx='__disk_load_valid', msg='dev %s meta_only %s' % (dev, meta_only))
            meta, buff = self.bdev.read_meta(dev)
            # tlog.write(ctx='__disk_load_valid', msg='dev %s meta_only %s meta %s' % (dev, meta_only, meta))
            if meta:
                if meta['type'] == 'meta' or meta['type'] == 'wlog':
                    return meta['type'], dev, None
                elif meta['type'] == 'data' and not meta_only:
                    disk_num = meta['disk']
                    pool = meta['pool']

                    if self.is_valid_lich_device(dev, disk_num):
                        return 'disk%d' % disk_num, dev, pool
        except Exception, e:
            traceback.print_exc()

        return None

    def __disk_spdk_valid(self, dev):
        """ i.e, /opt/fusionstack/data/disk/nvme/pci_p1_disk0_0000.08.00.0

        :param dev:
        :return:
        """
        res = None
        if not os.path.isdir(self.lichDisk.nvme_prefix()):
            return res

        found = False
        for path in os.listdir(self.lichDisk.nvme_prefix()):
            if path.startswith('pci_'):
                if path.endswith(dev):
                    s = path.split('_')
                    if not found:
                        found = True
                        res = (s[2], dev, s[1])
                    else:
                        raise Exp(errno.EINVAL, '%s repeat with %s' % (path, res))
        return res

    def __disk_load_search(self):
        """ get all usable lich devices.

        :return:
        """
        lich_disk = []
        all_parts = self.disk.get_all_parts()
        for dev in all_parts:
            try:
                dev_type = self.disk.get_dev_type(dev)
            except:
                continue

            if dev_type == 'ISCSI':
                continue

            match = self.__disk_load_valid(dev)
            if match:
                lich_disk.append(match)
            else:
                for part in all_parts[dev]:
                    match = self.__disk_load_valid(part)
                    if match:
                        lich_disk.append(match)

        " for spdk "
        all_spdk = self.disk.get_all_spdk()
        for dev in all_spdk:
            match = self.__disk_spdk_valid(dev)
            tlog.write(ctx='__disk_load_search', msg='dev %s match %s' % (dev, match))
            if match:
                lich_disk.append(match)

        return lich_disk

    def __disk_dedupe_for_nvme(self, exists, item):
        diskid = item[0]
        diskdev = item[1]
        diskpool = item[2]

        if not self.disk.is_nvme(exists[diskid]['dev']) or not self.disk.is_nvme(diskdev):
            return False
        if self.disk.is_spdk(exists[diskid]['dev']) and self.disk.is_spdk(diskdev):
            return False
        if not self.disk.is_spdk(exists[diskid]['dev']) and not self.disk.is_spdk(diskdev):
            return False

        if self.config.spdk:
            if not self.disk.is_spdk(exists[diskid]['dev']):
                pci = self.__nvme2spdk(exists[diskid]['dev'])
                if pci != diskdev:
                    return False
                exists[diskid]['dev'] = pci
            else:
                pci = self.__nvme2spdk(diskdev)
                if pci != exists[diskid]['dev']:
                    return False
                item[1] = pci
            return True
        else:
            if self.disk.is_spdk(exists[diskid]['dev']):
                self.__disk_del_spdk(exists[diskid]['dev'], exists[diskid]['id'], exists[diskid]['pool'])
                try:
                    pci = self.__spdk2nvme(exists[diskid]['dev'])
                    if pci != diskdev:
                        return False
                    exists[diskid]['dev'] = pci
                except:
                    exists[diskid]['dev'] = diskdev
            elif self.disk.is_spdk(diskdev):
                self.__disk_del_spdk(diskdev, int(diskid[4:]), diskpool)
                try:
                    pci = self.__spdk2nvme(diskdev)
                    if pci != exists[diskid]['dev']:
                        return False
                    item[1] = pci
                except:
                    item[1] = pci

    def __disk_load_check(self, items):
        lich_exists = {}
        lich_disk = []

        for item in items:
            diskid = item[0]
            diskdev = item[1]
            diskpool = item[2]
            if not diskdev:
                raise Exp(errno.EINVAL, '%s not specify' % diskid)
            if not self.disk.is_block(diskdev):
                raise Exp(errno.EINVAL, '%s is not block device' % diskdev)

            if diskid in lich_exists.keys():
                if self.__disk_dedupe_for_nvme(lich_exists, item):
                    pass
                else:
                    raise Exp(errno.EINVAL, '%s %s repeat' % (diskid, diskdev))

            if diskdev in lich_disk:
                raise Exp(errno.EINVAL, '%s repeat used' % diskdev)
            lich_disk.append(diskdev)

            if diskid == 'meta' or diskid == 'wlog':
                pass
            elif diskid.startswith('disk'):
                lich_exists[diskid] = {"id":int(diskid[4:]), "dev":diskdev, "pool":diskpool}
                m = re.match('disk(\d+)', diskid)
                if m is not None:
                    if int(m.group(1)) > DISK_MAX_NUM:
                        raise Exp(errno.EINVAL, '%s disk number too big' % diskid)
                else:
                    raise Exp(errno.EINVAL, '%s not support' % diskid)
            else:
                raise Exp(errno.EINVAL, '%s=%s config not support' % (diskid, diskdev))

    def __disk_load_cache(self, lich_items):
        cached_items = self.__get_lich_writeback_fromdata()

        for item in cached_items:
            if not cached_items[item]['cached']:
                continue

            disk = 'disk' + str(item)
            found = False
            for i in lich_items:
                if i[0] == disk:
                    found = True
                    break

            if not found:
                _syswarn(" disk_manage, %s cached but not found in system" % (disk))
                raise Exp(errno.EPERM, '%s cached but not found in system' % (disk))

            found = False
            for i in lich_items:
                if i[0] == disk:
                    found = True
                    break

            if not found:
                _syswarn(" disk_manage, %s cached but not found in system" % (disk))
                raise Exp(errno.EPERM, '%s cached but not found in system' % (disk))

    def __disk_load_cleanup(self, items, lich_items):
        for item in lich_items:
            found = False
            for i in items:
                if i[0] == item[0]:
                    found = True
                    break
            if (found):
                continue

            if item[1] is None:
                continue

            if item[0] == 'meta' or item[0] == 'wlog':
                continue
                # raise Exp(errno.EPERM, '%s not mount to %s' % (item[1], item[0]))
            elif item[0].startswith('disk'):
                disk_num = DiskName(item[0]).num
                if self.lichDisk.is_info_exists(disk_num):
                    continue

                now = int(time.time())
                disk_ctime = self.lichDisk.get_file_ctime(disk_num)
                if now - disk_ctime < 600:
                    _dwarn("disk%s(%s) file create time:%d, now:%d, less than 10m, ignore it !" % (item[0], item[1], disk_ctime, now))
                    continue

                _syswarn(" disk_manage1 cleanup, %s(%s) not found in system" % (item[0], item[1]))
                _dwarn(" cleanup: lich disk %s(%s) not found in system" % (item[0], item[1]))

                disk_path = self.lichDisk.get_path(disk_num, 'disk')
                _exec_system("rm -rf %s" % disk_path)

                self.__disk_del_info(disk_num)
            else:
                raise Exp(errno.EPERM, 'unknow %s(%s)' % (item[0], item[1]))

    def __disk_load_metawlog(self, items):
        meta_path = os.path.join(self.config.home, 'data')
        if not os.path.exists(meta_path):
            os.mkdir(meta_path)

        for item in items:
            if item[0] == 'meta':
                dev = self.disk.get_dev_bymounted(meta_path)
                if dev == item[1]:
                    continue
                elif dev is not None:
                    _dwarn("%s mounted by %s not %s!" % (meta_path, dev, item[1]))
                    continue

                try:
                    self.disk.dev_mount(item[1], meta_path)
                except Exp, e:
                    tlog.write(ctx='__disk_load_metawlog', msg='error: %s' % e)
                    _dwarn(e.err)

        if self.config.writeback:
            wlog_path = os.path.join(meta_path, 'wlog')
            if not os.path.exists(wlog_path):
                os.mkdir(wlog_path)

            load_wlog = False
            for item in items:
                if item[0] == 'wlog':
                    dev = self.disk.get_dev_bymounted(wlog_path)
                    if dev == item[1]:
                        load_wlog = True
                        continue
                    elif dev is not None:
                        _dwarn("%s mounted by %s not %s!"%(wlog_path, dev, item[1]))
                        continue
                    try:
                        self.disk.dev_mount(item[1], wlog_path)
                        load_wlog = True
                    except Exp, e:
                        _dwarn(e.err)

            if not load_wlog:
                _dwarn("%s not mount to disk" % wlog_path)

    def __disk_load_disk(self, items, lich_items, old_items):
        need_reset = False
        for disk, dev, pool in items:
            if disk.startswith('disk'):
                if dev.startswith('/dev'):
                    if not os.path.exists(dev):
                        continue

                right = False
                for lich_item in lich_items:
                    if lich_item[0] == disk and lich_item[1] == dev:
                        right = True
                        break

                if right:
                    continue

                # TODO must match with RAID changes
                if old_items and disk in old_items:
                    continue

                if self.__get_lich_writeback_cache(dev) != 0:
                    _syswarn(" lich disk %s(%s) not found in lich, but cached, so skip" % (disk, dev))
                    _dwarn(" lich disk %s(%s) not found in lich, but cached, so skip" % (disk, dev))
                    continue

                _syswarn(" disk_manage, %s(%s) not found in lich" % (disk, dev))
                _dwarn(" lich disk %s(%s) not found in lich" % (disk, dev))

                need_reset = True

                disk_num = DiskName(disk).num
                disk_path = self.lichDisk.get_path(disk_num, 'disk')

                _exec_system("rm -rf %s" % disk_path)
                self.__disk_del_info(disk_num)

                if not self.__disk_add_tier_retry(pool, dev, disk_num, retry=3):
                    continue

                try:
                    _dwarn('add link for %s' % dev)
                    self.__disk_add_link(dev, disk_num, pool)
                except:
                    _derror("%s add link failed." % dev)
                    traceback.print_exc()
                    raise

                print

        if need_reset:
            # self.__disk_add_reset()
            pass

    def __disk_load_nvme(self, items, lich_items):
        for lich_item in lich_items:
            disk = lich_item[0]
            if not disk.startswith('disk'):
                continue

            disk_num = int(disk[4:])
            dev = lich_item[1]
            pool = lich_item[2]

            dev_type = self.disk.get_dev_type(dev)
            if self.config.spdk and dev_type == 'NVMe':
                self.__disk_del_link(disk_num)
                pci = self.__nvme2spdk(dev)
                try:
                    self.__disk_add_link(pci, disk_num, pool)
                except:
                    _derror("%s add link failed." % dev)
                    raise
            elif not self.config.spdk and dev_type == 'SPDK':
                self.__disk_del_link(disk_num)
                self.__disk_del_spdk(dev, disk_num, pool)
                pci = self.__spdk2nvme(dev.split('_')[-1])
                try:
                    self.__disk_add_link(pci, disk_num, pool)
                except:
                    _derror("%s add link failed." % dev)
                    raise

    def __disk_load_loading(self, items, verbose=False, old_items=None):
        if not os.path.exists(self.disk_home):
            os.makedirs(self.disk_home)
        else:
            pass

        lich_items = self.__get_lich_disk(need_convert=False)

        tlog.write(ctx='__disk_load_loading', msg='items %s' % items)
        tlog.write(ctx='__disk_load_loading', msg='lichs %s' % lich_items)

        if verbose:
            # pprint.pprint(items)
            # print len(items)
            # pprint.pprint(lich_items)
            # print len(lich_items)
            # pprint.pprint(cached_items)
            # print len(cached_items)
            pass

        '''
        #cached disk not found in system
        '''
        self.__disk_load_cache(lich_items)

        '''
        #lich disk not found in system
        '''
        self.__disk_load_cleanup(items, lich_items)

        '''
        #mount meta disk
        '''
        # self.__disk_load_metawlog(items)

        '''
        # diskname changed, add disk again
        '''
        self.__disk_load_disk(items, lich_items, old_items)

        '''
        #nvme type changed, reset disk link
        '''
        self.__disk_load_nvme(items, lich_items)

    def ensure_meta(self):
        all_parts = self.disk.get_all_parts()

        tlog.write(ctx='ensure_meta', msg='all_parts %s' % all_parts)

        # first mount meta and wlog fs partitions
        found = False
        for dev, parts in all_parts.iteritems():
            if len(parts) == 0:
                parts.append(dev)

            for part in parts:
                match = self.__disk_load_valid(part, meta_only=True)
                if match and match[0] in ['meta']:
                    tlog.write(ctx='ensure_meta', msg='dev %s match %s' % (part, match))
                    self.__disk_load_metawlog([match])
                    found = True
                    break

            if found:
                break

    def __fix_nvme(self):
        try:
            pool_list = self.pool_manage.pool_list()
            time.sleep(0.2)
            pool_list2 = self.pool_manage.pool_list()
            time.sleep(0.2)
            pool_list3 = self.pool_manage.pool_list()

            if not list_equal(pool_list, pool_list2) or not list_equal(pool_list, pool_list3):
                tlog.write(ctx='__fix_nvme',
                           msg='%s != %s or %s != %s' % (pool_list, pool_list2,
                                                         pool_list, pool_list3))
                return

            tlog.write(ctx='__fix_nvme', msg='pools=%s' % pool_list)

            path = self.lichDisk.nvme_prefix()
            if os.path.exists(path):
                for x in os.listdir(path):
                    need_fix = False

                    l = x.split('_')
                    if not x.startswith('pci_') or len(l) != 4:
                        need_fix = True
                    else:
                        if l[1] not in pool_list:
                            need_fix = True

                    if need_fix:
                        fpath = os.path.join(path, x)
                        tlog.write(ctx='__fix_nvme', msg='unlink %s' % fpath)
                        os.unlink(fpath)
        except Exception, e:
            # traceback.print_exc()
            tlog.write(ctx='__fix_nvme', msg='%s' % e)

    def __disk_load_auto(self, verbose=False, load_diff=False):
        """
        Disk Management:
          1. RAID
          2. bcache
          3. lichd

        :todo if enter many times, need lock protect
        :todo bugfix #11590

        :param verbose:
        :return:

        """
        begin_time = time.time()

        tlog.write(ctx='__disk_load_auto')

        self.__fix_nvme()

        self.ensure_meta()

        old_disks = self.__disk_load_search()

        if not self.raid:
            self.raid = RAID(self.config)

        found = False

        if self.raid and self.raid.has_raid():
            # _dmsg("check RAID ...")

            found1 = self.raid_miss()
            found2 = self.raid_load()
            if found1 or found2:
                self.raid.refresh()
                found = True
                if verbose:
                    _dmsg('raid_miss %s raid_load %s' % (found1, found2))

            time.sleep(1)

        if self.config.cache_enable:
            # _dmsg("check cache ...")

            found3 = self.cache_attach()
            if found3:
                found = True
                if verbose:
                    _dmsg('cache_attach %s' % (found3))

        if found:
            new_disks = self.__disk_load_search()
        else:
            new_disks = old_disks

        if load_diff:
            old_items = set([disk for disk, _dev, _pool in old_disks])
            if len(new_disks) != len(old_items):
                _dmsg('%d %d %s %s' % (len(old_items), len(new_disks), old_items, new_disks))
        else:
            old_items = set()

        self.__disk_load_check(new_disks)

        self.__disk_load_loading(new_disks, verbose, old_items=old_items)

        if verbose:
            end_time = time.time()
            _dmsg("disk load used time: %.2f s" % (end_time - begin_time))

    def __disk_load_conf(self, conf):
        if not os.path.exists(conf):
            raise Exp(errno.EINVAL, '%s not exists' % conf)

        cf = ConfigParser.ConfigParser()
        cf.read(conf)
        items = cf.items("disk")

        self.__disk_load_check(items)

        self.__disk_load_loading(items)

    def __disk_load_args(self, args):
        for arg in args:
            if '=' not in arg:
                raise Exp(errno.EINVAL, '%s is invalid argument, use --help for help' % arg)
            if not arg.startswith('meta=') and \
                not arg.startswith('wlog=') and \
                not arg.startswith('disk'):
                    raise Exp(errno.EINVAL, '%s is invalid argument, use --help for help' % arg)

        items = []
        for arg in args:
            items.append(tuple(arg.split('=')))

        self.__disk_load_check(items)

        self.__disk_load_loading(items)

    def disk_meta(self, args=None, verbose=False):
        """ get all usable lich devices.

        :param args:
        :param verbose:
        :return:
        """
        data_devs = {}
        unused_devs = {}

        devs = self.__get_all_disk(is_all=False)

        for dev, v in devs.iteritems():
            # TODO because dirty data, read from /dev/sda and /dev/bcache0 maybe different
            # use dd to check:
            #   /dev/bcache0:        1204 + 588 = 1612 (true)
            #   /dev/sda:     8192 + 1204 + 588 = 9804 (stale)

            mapping_dev = self.commoncache.get_mappingdev_by_coredev(dev)
            if mapping_dev:
                dev_tmp = mapping_dev
            else:
                dev_tmp = dev

            meta, buff = self.bdev.read_meta(dev_tmp, match_cluster_and_node=True, bcache_header=False)
            if meta:
                data_devs[dev] = meta

                # self.bdev.update_meta(dev)
            else:
                unused_devs[dev] = v

        return data_devs, unused_devs

    def disk_show(self, args=None, verbose=False):
        """
        :todo check RAID status
        :todo check block and info file
        :todo check bitmap file

        :param args:
        :param verbose:
        :return:
        """
        all_attach_devs = set()

        csets = self.bdev.cset_list(verbose=True)

        print '==summary:'
        print '  cache: %s' % 'on' if self.config.cache_enable else 'off'
        print '  cache_type: %s' % self.config.cache_type
        # print '  cache_mode: %s' % self.config.cache_mode

        print
        print '==cset:'
        for uuid, bdevs in csets.iteritems():
            msg = '  %s %d %-4s %s' % (uuid, len(bdevs), self.bdev.cache_devcie_by_cset(uuid), bdevs)
            if len(bdevs):
                _dmsg(msg)
            else:
                _derror(msg)

            for dev in bdevs:
                path = '/sys/block/%s/bcache' % dev
                if not os.path.isdir(path):
                    _derror('    %s is invalid dir' % path)

            all_attach_devs = all_attach_devs.union(bdevs)

        print
        print '==disks:'
        data_devs, unused_devs = self.disk_meta()

        count = 0
        for dev, meta in sorted(data_devs.items(), key=lambda x: x[0], reverse=False):
            # TODO because dirty data, read from /dev/sda and /dev/bcache0 maybe different
            # use dd to check:
            #   /dev/bcache0:        1204 + 588 = 1612 (true)
            #   /dev/sda:     8192 + 1204 + 588 = 9804 (stale)

            mapping_dev = self.commoncache.get_mappingdev_by_coredev(dev)

            register = 1 if self.bdev.is_registered(dev) else 0
            attach = 1 if DevName(dev).name in all_attach_devs else 0

            msg = '%4d %-10s %-14s reg %s att %s %s' % (count, dev, mapping_dev, register, attach, meta)

            if not register or not attach or meta['type'] != 'data' or meta['cset'] == '':
                _derror(msg)
            else:
                _dmsg(msg)

            count += 1

        print
        print '==Non-data disks:'
        for dev, v in sorted(unused_devs.items(), key=lambda x: x[0], reverse=False):
            _dmsg('  %s %s' % (dev, v))

        print
        print '==lich disks:'

        files = ['disk', 'block', 'info', 'bitmap', 'tier', 'speed', 'rotation']

        d = {}

        disk_num = self.lichDisk.get_max_disk_num() + 1
        for i in range(disk_num):
            d[i] = {}

            meta = None

            buf = StringIO()
            buf.write('disk %4d' % i)

            # symlink
            disk_path = self.lichDisk.get_path(i, 'disk')
            if os.path.islink(disk_path):
                dev = os.path.realpath(disk_path)
                if dev.startswith('/dev'):
                    buf.write(' %-20s' % dev)

                    meta, buff = self.bdev.read_meta(dev)
                else:
                    buf.write(' %-20s' % '')

                # backing device
                bdev = self.bdev.backing_device_by_bcache(dev)
                if bdev:
                    buf.write(' %-8s' % bdev)
                else:
                    buf.write(' %-8s' % '')
            else:
                buf.write(' %-20s' % '')
                buf.write(' %-8s' % '')

            for t in files:
                d[i][t] = 1 if os.path.exists(self.lichDisk.get_path(i, t)) else 0
                buf.write(' %s %d' % (t, d[i][t]))

            if meta:
                buf.write(" - meta %-4s disk %d" % (meta['type'], meta['disk']))
            else:
                buf.write(" - meta %-6s" % '')

            if d[i]['disk']:
                _dmsg(buf.getvalue())
            else:
                _derror(buf.getvalue())

        # pprint.pprint(d)
        return d

    def cache_attach(self, args=None, verbose=False):
        # record changed devices

        attach_devs = []

        self.bdev.disk_relation_recover()
        csets = self.bdev.cset_list()
        if not csets:
            return attach_devs

        devs, _ = self.disk_meta(verbose=False)

        for uuid, bdevs in csets.iteritems():
            # _dmsg('cset_uuid %s %s' % (uuid, bdevs))

            # get related backing devices and attach them to this cache set
            for dev, meta in devs.iteritems():
                if meta['type'] == 'meta':
                    continue

                if uuid == meta['cset']:
                    if DevName(dev).name in bdevs:
                        continue

                    try:
                        self.commoncache.check_and_register(dev)
                    except Exp, e:
                        _dwarn(e.err)

                    mapping_dev = self.commoncache.get_mappingdev_by_coredev(dev)
                    if mapping_dev:
                        try:
                            self.commoncache.attach_device(uuid, dev)

                            bdevs_after = self.bdev.backing_device_by_cset(uuid)
                            if DevName(dev).name in bdevs_after:
                                attach_devs.append(dev)
                        except Exp, e:
                            _dwarn(e.err)
                            # traceback.print_exc()

        return attach_devs

    def disk_load(self, args=None, verbose=False, load_diff=False):
        tlog.write(ctx='disk_load', msg='args %s verbose %s load_diff %s config.testing: %s' % (
            args, verbose, load_diff, self.config.testing))

        if self.config.testing:
            return

        if not args or len(args) == 0:
            return self.__disk_load_auto(verbose=verbose, load_diff=load_diff)
        elif len(args) == 1 and args[0].startswith('conf='):
            return self.__disk_load_conf(args[0].split('=')[-1])
        else:
            return self.__disk_load_args(args)

    def __disk_check_env(self):
        if not os.path.exists(self.disk_home):
            os.makedirs(self.disk_home)
        if not os.path.exists(self.tier_path):
            os.makedirs(self.tier_path)
        if not os.path.exists(self.speed_path):
            os.makedirs(self.speed_path)
        if not os.path.exists(self.rotation_path):
            os.makedirs(self.rotation_path)

        '''
        try:
            pool_list = self.pool_manage.pool_list()
        except:
            return

        all_pool = self.__get_all_pool()
        for p in all_pool:
            if p not in pool_list:
                self.pool_manage.pool_cleanup(p)

        err_info = ''
        try:
            (out_msg, err_msg) = _exec_pipe1(["sgdisk"], 0, False)
        except Exp, e:
            if not e.err.startswith('Usage:'):
                err_info += e.err + '\n'

        if not os.path.exists(self.disk_home):
                err_info += '%s not exists\n' % self.disk_home

        if err_info != '':
            raise Exp(errno.EPERM, err_info.strip())
        '''
        pass

    def __get_all_disk(self, is_all):
        all_disk = {}
        all_devs = []
        sys_dev = []
        nvme_dev = []

        try:
            all_devs = self.disk.get_all_devs()

            sys_dev = self.disk.get_sys_dev(True)

            spdk_dev = self.disk.get_all_spdk()
        except:
            pass

        for dev in all_devs:
            try:
                dev_type = self.disk.get_dev_type(dev)
            except:
                continue

            if dev_type == "ISCSI":
                continue
            elif dev_type == "NVMe":
                pci = self.disk.get_dev_pci(dev)
                nvme_dev.append(pci)
            elif dev_type == "UNKNOW" and not is_all:
                continue

            if self.disk.is_swap(dev):
                continue
            if dev in sys_dev and not is_all:
                continue

            blk = LSBLK(dev)
            if blk.dev_type() in ['rom']:
                continue

            all_disk[dev] = {}

            dev_parts = self.disk.get_dev_parts(dev)
            if len(dev_parts) != 0:
                all_disk[dev]['part_info'] = {}

            for part in dev_parts:
                if part in sys_dev:
                    continue
                try:
                    all_disk[dev]['part_info'][part] = {}
                except Exception, e:
                    pass

        for dev in spdk_dev:
            if dev in nvme_dev:
                continue
            all_disk[dev] = {}

        return all_disk

    def __update_media_type_etc(self, target, dev_info, raid_info, disk_info, dev):
        if dev_info['type'] == 'RAID':
            if raid_info is not None:
                target['raid_info'] = raid_info
                target['dev_info']['media_type'] = disk_info['media_type']
                target['dev_info']['interface'] = disk_info['interface']
                target['dev_info']['curr_temp'] = disk_info['curr_temp']
                target['dev_info']['max_temp'] = disk_info['max_temp']
            else:
                # TODO JBOD
                if target['dev_info']['media_type'] == 'RAID':
                    dev_name = dev
                    dev_type = 'UNKNOWN'
                    if dev.startswith("/dev/"):
                        dev_name = dev[5:]

                    rotational = "/sys/block/"+dev_name+"/queue/rotational"
                    if os.path.exists(rotational):
                        type_num = _exec_pipe(["cat", rotational], 0, False)
                        type_num = type_num.strip()
                        if '0' == type_num:
                            dev_type = 'SSD'
                        elif '1' == type_num:
                            dev_type = 'HDD'

                    target['dev_info']['media_type'] = dev_type

    def __disk_list_getall(self, is_all=False):
        """
        :param is_all:
        :return:
            {'/dev/sda': {
                'type': 'cache' | 'disk%',
                'flag': 'sys' | 'lich' | 'new',
                'mode': 'dev',
                'dev_info': {},     // bcache
                'raid_info': {},
                }
            }
        """

        # print '000', time.time()

        if not self.raid:
            self.raid = RAID(self.config)

        all_disk_json = {}

        # print '111', time.time()

        lich_items = self.__get_lich_disk()
        lich_items_old = self.__get_lich_disk(False)
        lich_disk = [dev for (x, dev, pool) in lich_items if dev]
        lich_disk_old = [dev for (x, dev, pool) in lich_items_old if dev]
        lich_nvme = self.__get_lich_nvme(lich_items)

        # print '222', time.time()

        lich_dev = self.__get_lich_dev()

        # print '333', time.time()

        disk_stat = self.__get_disk_stat(lich_items)

        # print '444', time.time()

        all_disk = self.__get_all_disk(is_all)

        # print '555', time.time()

        sys_dev = []
        try:
            sys_dev = self.disk.get_sys_dev()
        except:
            pass

        # print '666', time.time()
        # return {}

        # pprint.pprint(lich_items)
        # pprint.pprint(lich_disk)
        # pprint.pprint(lich_dev)
        # pprint.pprint(all_disk)

        raid_info = None
        disk_info = None

        for dev in all_disk:
            dev_info = self.disk.get_dev_info(dev)
            if dev_info['type'] == 'RAID':
                try:
                    raid_info = self.raid.raid_info(dev)
                    if raid_info is not None:
                        disk_info = self.raid.disk_info(raid_info['disk'][0])
                        # print 'disk_info', dev, raid_info['disk'][0], disk_info
                        if 'curr_temp' not in disk_info:
                            disk_info['curr_temp'] = '0'
                        if 'max_temp' not in disk_info:
                            disk_info['max_temp'] = '0'
                except Exception, e:
                    # traceback.print_exc()
                    # TODO disk_info not updated
                    raid_info = None
                    disk_info = None

            if self.config.cache_enable:
                if self.commoncache.is_running_coredev(dev):
                    cacheid, cachedev, cache_status, cache_mode = self.commoncache.get_cacheinfo_by_coredev(dev)
                    all_disk_json[dev] = all_disk[dev]
                    all_disk_json[dev]['dev_info'] = dev_info
                    all_disk_json[dev]['dev_info']['cache'] = str(cachedev)
                    all_disk_json[dev]['dev_info']['cache_mode'] = cache_mode
                    all_disk_json[dev]['dev_info']['status'] = cache_status

            # print dev, dev_info, raid_info, disk_info

            if dev in lich_disk:
                all_disk_json[dev] = all_disk[dev]
                all_disk_json[dev]['flag'] = 'lich'
                all_disk_json[dev]['mode'] = 'dev'
                all_disk_json[dev]['dev_info'] = dev_info

                self.__update_media_type_etc(all_disk_json[dev], dev_info, raid_info, disk_info, dev)

                for (item_type, item_dev, item_pool) in lich_items:
                    if item_dev == dev:
                        all_disk_json[dev]['type'] = item_type
                        all_disk_json[dev]['pool'] = item_pool
                        if item_type in disk_stat:
                            all_disk_json[dev]['disk_stat'] = disk_stat[item_type]
                continue

            if dev in lich_disk_old:
                continue

            if 'part_info' in all_disk[dev].keys():
                for part in all_disk[dev]['part_info']:
                    if part in lich_disk:
                        all_disk_json[part] = {}
                        all_disk_json[part]['dev_info'] = self.disk.get_part_info(part)
                        all_disk_json[part]['flag'] = 'lich'
                        all_disk_json[part]['mode'] = 'part'
                        for (item_type, item_dev, item_pool) in lich_items:
                            if item_dev == part:
                                all_disk_json[part]['type'] = item_type
                                all_disk_json[part]['pool'] = item_pool
                                if item_type in disk_stat:
                                    all_disk_json[part]['disk_stat'] = disk_stat[item_type]
                        all_disk_json[part]['dev_info']['cache'] = dev_info['cache']

                        self.__update_media_type_etc(all_disk_json[part], dev_info, raid_info, disk_info, dev)

            if dev in sys_dev:
                if not is_all:
                    continue

                all_disk_json[dev] = all_disk[dev]
                all_disk_json[dev]['dev_info'] = dev_info
                all_disk_json[dev]['flag'] = 'sys'
                all_disk_json[dev]['mode'] = 'dev'

                if 'part_info' in all_disk[dev].keys():
                    for part in all_disk[dev]['part_info']:
                        all_disk_json[dev]['part_info'][part] = self.disk.get_part_info(part)

                self.__update_media_type_etc(all_disk_json[dev], dev_info, raid_info, disk_info, dev)
                continue
            elif dev in lich_dev:
                continue
            elif dev in lich_nvme:
                continue
            else:
                all_disk_json[dev] = all_disk[dev]
                all_disk_json[dev]['dev_info'] = dev_info

                if self.config.cache_enable:
                    if self.commoncache.is_running_cachedev(dev):
                        cacheid, cache_status, cache_mode = self.commoncache.get_cacheinfo_by_cachedev(dev)
                        all_disk_json[dev]['flag'] = 'lich'

                        all_disk_json[dev]['type'] = 'cache'
                        all_disk_json[dev]['cacheid'] = cacheid
                        all_disk_json[dev]['cache_status'] = cache_status
                        all_disk_json[dev]['cache_mode'] = cache_mode
                        all_disk_json[dev]['pool'] = 'None'

                        coredevs = self.commoncache.list_coredevs_by_cachedev(dev)
                        for coredev in coredevs:
                            for (item_type, item_dev, item_pool) in lich_items:
                                if item_dev == coredev:
                                    all_disk_json[dev]['pool'] = item_pool
                                    break

                            if all_disk_json[dev]['pool'] != 'None':
                                break

                        # TODO need check /sys/fs/bcache/<cset-uuid>

                    else:
                        all_disk_json[dev]['flag'] = 'new'
                        if self.disk.dev_check_mounted(dev) or self.disk.is_part(dev):
                            all_disk_json.pop(dev)
                            continue
                else:
                    all_disk_json[dev]['flag'] = 'new'
                    if self.disk.dev_check_mounted(dev) or self.disk.is_part(dev):
                        all_disk_json.pop(dev)
                        continue

                all_disk_json[dev]['mode'] = 'dev'
                if 'part_info' in all_disk[dev].keys():
                    all_disk_json.pop(dev)
                    continue

                self.__update_media_type_etc(all_disk_json[dev], dev_info, raid_info, disk_info, dev)

        new_raid_disk = self.raid.disk_list()
        if new_raid_disk:
            for adp, disks in new_raid_disk.iteritems():
                for disk in disks:
                    assert disk == disks[disk]['inq']

                    # skip JBOD
                    if disks[disk]['stat'] == 'JBOD':
                        continue

                    all_disk_json[disk] = {}
                    all_disk_json[disk]['flag'] = 'new'
                    all_disk_json[disk]['mode'] = 'disk'
                    all_disk_json[disk]['raid_info'] = disks[disk]

        # cache disk add disk_stat for UMP
        cache_num = 0
        for dev, v in all_disk_json.iteritems():
            if 'type' not in v:
                cache_num = 0
                break

            if v['type'] == 'cache':
                cache_num += 1

        if cache_num > 0:
            for dev, v in all_disk_json.iteritems():
                if v['type'] == 'cache':
                    v['dirty_data'] = 0

            for dev, v in all_disk_json.iteritems():
                if v['type'] == 'cache' or v['type'] == 'meta' or v['type'] == 'wlog':
                    continue

                if 'cache' in v['dev_info']:
                    cachedev = v['dev_info']['cache']
                    #if cachedev != 'Enabled' and cachedev != 'Disabled':
                    if cachedev:
                        sdName = os.path.split(dev)[-1]
                        all_disk_json[cachedev]['dirty_data'] += Bcache.dirty_data(sdName)

            for dev, v in all_disk_json.iteritems():
                if v['type'] == 'cache':
                    all_disk_json[dev]['disk_stat'] = Bcache.disk_stat(v['dev_info']['size'], v['dirty_data'])
                    del v['dirty_data']

        return all_disk_json

    def __disk_list_getusable(self, all_disk, force):
        usable_disk = []
        for disk in all_disk:
            if all_disk[disk]['flag'] == 'new':
                if all_disk[disk]['mode'] == 'disk':
                    continue
                if self.disk.dev_check_mounted(disk) and not force:
                    continue
                if 'part_info' in all_disk[disk] and not force:
                    continue

                dev_parts = self.disk.get_dev_parts(disk)
                if len(dev_parts) and not force:
                    continue

                if all_disk[disk]['dev_info']['type'] == 'RAID':
                    if 'raid_info' in all_disk[disk]:
                        if all_disk[disk]['raid_info']['raid'] != '0' and not force:
                            continue
                elif all_disk[disk]['dev_info']['cache'] is None and not force:
                    continue

                usable_disk.append(disk)
        return usable_disk

    def __show_sys_raid(self, sys_raid):
        self.__show_unused_raid(sys_raid, {})

    def __show_sys_dev(self, sys_dev):
        self.__show_unused_dev(sys_dev, {})

    def __show_used_raid(self, used_raid, used_dev):
        for adp in used_raid:
            print("RAID Adapter #%s:%s" % (adp, used_raid[adp]['adp_name']))
            for dev, dev_info in used_raid[adp]['adp_dev'].iteritems():
                if dev_info['mode'] == 'dev':
                    lich_type = dev_info['type']
                    lich_pool = dev_info['pool']
                    raid_type = dev_info['dev_info']['type'] + dev_info['raid_info']['raid']
                    media_type = dev_info['dev_info']['media_type']
                    interface = dev_info['dev_info']['interface']
                    size = dev_info['dev_info']['size']
                    if 'disk_stat' in dev_info:
                        free = (int(dev_info['disk_stat']['total']) -
                                int(dev_info['disk_stat']['used'])) * (1024*1024)
                        free += (int(dev_info['disk_stat']['wbtotal']) -
                                int(dev_info['disk_stat']['wbused'])) * (1024*1024)
                        free = _human_readable(free)
                    else:
                        free = dev_info['dev_info']['free']

                    mount = dev_info['dev_info']['mount']

                    cache = ''
                    if 'smart_path' in dev_info['raid_info']:
                        if dev_info['raid_info']['smart_path'] == 'enable':
                            cache = 'cache:smart_path'

                    if cache == '':
                        cache = ' raid_cache:%-12s' % (dev_info['raid_info']['raid_cache'])
                        cache += ' disk_cache:' + dev_info['raid_info']['disk_cache']
                        if self.config.cache_enable:
                            cache_tmp = used_dev[dev]['dev_info']['cache']
                            if cache_tmp is None:
                                cache += ' cache:Notsupport'
                            else:
                                cache_mode = used_dev[dev]['dev_info']['cache_mode']
                                cache_status = used_dev[dev]['dev_info']['status']
                                cache += ' cachedev:' + cache_tmp
                                cache += ' status:' + cache_status
                                cache += ' mode:' + cache_mode

                    label = dev_info['dev_info']['label']
                    info = ''
                    if lich_type.startswith('disk'):
                        info += 'tier:' + dev_info['tier']
                        if 'disk_stat' in dev_info:
                            if dev_info['disk_stat']['cache'] != '0':
                                info += ' writeback:' + dev_info['dev_info']['size']
                                info += ',Enabled '
                                '''
                                info += ' writeback:' + _human_readable(int(used_raid[adp]['adp_dev'][dev]['disk_stat']['wbtotal']) * (1024*1024))
                                if used_raid[adp]['adp_dev'][dev]['disk_stat']['cached'] == '1':
                                    info += ',Enabled'
                                else:
                                    info += ',Disabled'
                                '''
                        else:
                            if dev_info['writeback']['cache'] != '0':
                                info += ' writeback:' + dev_info['dev_info']['size']
                                info += ',Enabled '
                                '''
                                info += ' writeback:' + _human_readable(_human_unreadable(size) * \
                                        int(used_raid[adp]['adp_dev'][dev]['writeback']['cache']) / 100)
                                if used_raid[adp]['adp_dev'][dev]['writeback']['cached'] == 1:
                                    info += ',Enabled'
                                else:
                                    info += ',Disabled'
                                '''
                    if label and label != '':
                        info += ' label:' + label + ' '
                    info += ' ' + cache

                    if self.config.cache_enable:
                        mapping_dev = self.commoncache.get_mappingdev_by_coredev(dev)
                        if not mapping_dev:
                            mapping_dev = 'N/A'

                        print("  %-6s %-9s %-13s %-9s %-9s %-4s %-4s %-9s free:%-8s %s" %
                              (lich_type, dev, mapping_dev, lich_pool, raid_type, media_type, interface, size, free, info))
                    else:
                        print("  %-6s %-9s %-9s %-9s %-4s %-4s %-9s free:%-8s %s" %
                              (lich_type, dev, lich_pool, raid_type, media_type, interface, size, free, info))
                else:
                    lich_type = dev_info['type']
                    lich_pool = dev_info['pool']
                    media_type = dev_info['dev_info']['media_type']
                    interface = dev_info['dev_info']['interface']
                    size = dev_info['dev_info']['size']
                    fs = dev_info['dev_info']['fs']
                    mount = dev_info['dev_info']['mount']
                    info = ''
                    if lich_type.startswith('disk'):
                        info += 'tier:' + dev_info['tier']
                        if 'disk_stat' in dev_info:
                            if dev_info['disk_stat']['cache'] != '0':
                                info += ' writeback:' + _human_readable(int(dev_info['disk_stat']['wbtotal']) * (1024*1024))
                                if dev_info['disk_stat']['cached'] == '1':
                                    info += ',Enabled'
                                else:
                                    info += ',Disabled'
                        else:
                            if dev_info['writeback']['cache'] != '0':
                                info += ' writeback:' + _human_readable(_human_unreadable(size) * \
                                        int(dev_info['writeback']['cache']) / 100)
                                if dev_info['writeback']['cached'] == 1:
                                    info += ',Enabled'
                                else:
                                    info += ',Disabled'

                    print("  %-6s %-6s %-9s %-4s %-4s Partition %-9s %-13s %s" %
                          (lich_type, dev, lich_pool, media_type, interface, size, fs, info))

    def __show_used_dev(self, used_dev):
        for dev in used_dev:
            if used_dev[dev]['mode'] == 'dev':
                lich_type = used_dev[dev]['type']
                disk_type = used_dev[dev]['dev_info']['type']
                size = used_dev[dev]['dev_info']['size']

                if 'disk_stat' in used_dev[dev]:
                    free = (int(used_dev[dev]['disk_stat']['total']) -
                            int(used_dev[dev]['disk_stat']['used'])) * (1024*1024)
                    free += (int(used_dev[dev]['disk_stat']['wbtotal']) -
                             int(used_dev[dev]['disk_stat']['wbused'])) * (1024*1024)
                    free = _human_readable(free)
                else:
                    free = used_dev[dev]['dev_info']['free']

                mount = used_dev[dev]['dev_info']['mount']
                cache = used_dev[dev]['dev_info']['cache']
                label = used_dev[dev]['dev_info']['label']
                lich_pool = used_dev[dev]['pool']
                info = ''
                if lich_type.startswith('disk'):
                    if self.commoncache.is_running_coredev(dev):
                        status = used_dev[dev]['dev_info']['status']
                        cache_mode = used_dev[dev]['dev_info']['cache_mode']
                    else:
                        status = None
                        cache_mode = None
                    info += 'tier:' + used_dev[dev]['tier']

                    if 'disk_stat' in used_dev[dev]:
                        if used_dev[dev]['disk_stat']['cache'] != '0':
                            info += ' writeback:' + used_dev[dev]['dev_info']['size']
                            info += ',Enabled'
                            '''
                            if used_dev[dev]['disk_stat']['cached'] == '1':
                                info += ',Enabled'
                            else:
                                info += ',Disabled'
                            '''
                    else:
                        if used_dev[dev]['writeback']['cache'] != '0':
                            info += ' writeback:' + used_dev[dev]['dev_info']['size']
                            info += ',Enabled'
                        '''
                            info += ' writeback:' + _human_readable(_human_unreadable(size) * \
                                    int(used_dev[dev]['writeback']['cache']) / 100)
                            if used_dev[dev]['writeback']['cached'] == 1:
                                info += ',Enabled'
                            else:
                                info += ',Disabled'
                        '''

                    if cache is None:
                        info += ' cache:Notsupport'
                    else:
                        info += ' cachedev:' + cache
                    if status is None:
                        info += ' status:None'
                    else:
                        info += ' status:' + status
                    if cache_mode is None:
                        info += ' cache_mode:None'
                    else:
                        info += ' cache_mode:' + cache_mode
                    if label is not None and label != '':
                        info += ' label:' + label

                    print("  %-6s %-15s %-9s %-9s %-9s free:%-8s %s" % (lich_type, dev, lich_pool, disk_type, size, free, info))
                elif lich_type.startswith('cache'):
                    # check /sys/fs/bcache/<cset-uuid>
                    cset_uuid = used_dev[dev]['cacheid']
                    if os.path.isdir(os.path.join('/sys/fs/bcache', cset_uuid)):
                        status = 'online'
                    else:
                        status = 'offline'

                    print("  %-6s %-15s %-9s %-9s %-9s %32s [%s]" % (lich_type, dev, lich_pool, disk_type, size, cset_uuid, status))
            else:
                lich_type = used_dev[dev]['type']
                lich_pool = used_dev[dev]['pool']
                size = used_dev[dev]['dev_info']['size']
                fs = used_dev[dev]['dev_info']['fs']
                mount = used_dev[dev]['dev_info']['mount']
                info = ''
                dev_type = self.disk.get_dev_type(self.disk.get_dev(dev))
                if lich_type.startswith('disk'):
                    info += 'tier:' + used_dev[dev]['tier']
                    if 'disk_stat' in used_dev[dev]:
                        if used_dev[dev]['disk_stat']['cache'] != '0':
                            info += ' writeback:' + _human_readable(int(used_dev[dev]['disk_stat']['wbtotal']) * (1024*1024))
                            if used_dev[dev]['disk_stat']['cached'] == '1':
                                info += ',Enabled'
                            else:
                                info += ',Disabled'
                    else:
                        if used_dev[dev]['writeback']['cache'] != '0':
                            info += ' writeback:' + _human_readable(_human_unreadable(size) * \
                                    int(used_dev[dev]['writeback']['cache']) / 100)
                            if used_dev[dev]['writeback']['cached'] == 1:
                                info += ',Enabled'
                            else:
                                info += ',Disabled'

                print("  %-6s %-6s %-9s %s(Part) %-9s %-13s %s" %(lich_type, dev, lich_pool, dev_type, size, fs, info))

    def __show_unused_raid(self, unused_raid, usable_disk):
        for adp in unused_raid:
            print("RAID Adapter #%s:%s" %(adp, unused_raid[adp]['adp_name']))

            for dev, dev_i in unused_raid[adp]['adp_dev'].iteritems():
                if dev_i['mode'] == 'dev':
                    type = dev_i['dev_info']['type'] + dev_i['raid_info']['raid']
                    media_type = dev_i['dev_info']['media_type']
                    interface = dev_i['dev_info']['interface']
                    size = dev_i['dev_info']['size']
                    #free = dev_i['dev_info']['free']
                    mount = dev_i['dev_info']['mount']

                    cache = ''
                    if 'smart_path' in dev_i['raid_info']:
                        if dev_i['raid_info']['smart_path'] == 'enable':
                            cache = 'cache:smart_path'

                    if cache == '':
                        cache = 'raid_cache:' + dev_i['raid_info']['raid_cache']
                        cache += ' disk_cache:' + dev_i['raid_info']['disk_cache']

                    label = dev_i['dev_info']['label']

                    info = ''
                    if mount is not None:
                        info += 'mount:' + mount + ' '
                    info += cache
                    if label is not None and label != '':
                        info += ' label:' + label

                    if dev in usable_disk:
                        if dev_i['flag'] == 'lich':
                            info += ' (in)'
                        elif dev_i['flag'] == 'new':
                            info += ' (out)'
                        else:
                            info += ' (%s)' % dev_i['flag']

                    print("  %-6s %-9s %-4s %-4s %-9s %s" % (dev, type, media_type, interface, size, info))

                    if 'part_info' in dev_i.keys():
                        for part in sorted(dev_i['part_info']):
                            type = dev_i['part_info'][part]['type']
                            if type == 'extended':
                                print("    %s extended" % part)
                            else:
                                size = dev_i['part_info'][part]['size']
                                fs = dev_i['part_info'][part]['fs']
                                mount = dev_i['part_info'][part]['mount']
                                info = ''
                                if mount is not None:
                                    info += mount
                                if fs is None:
                                    fs = ''
                                print("    %-4s %-9s %-9s %-13s %s" %(part, type, size, fs, info))

            for dev, dev_i in unused_raid[adp]['adp_dev'].iteritems():
                if dev_i['mode'] == 'disk':
                    type = dev_i['raid_info']['media_type']
                    size = dev_i['raid_info']['size']
                    slot = dev_i['raid_info']['slot']

                    foreign = dev_i['raid_info']['foreign']
                    if foreign == 'None':
                        foreign = ''
                    else:
                        foreign = '(' + foreign + ')'

                    print("  inq:%-30s type:%s size:%s slot:%s  %s" % (dev, type, size, slot, foreign))

    def __show_unused_dev(self, unused_dev, usable_disk):
        status = None
        cache_mode = None

        for dev in unused_dev:
            type = unused_dev[dev]['dev_info']['type']
            size = unused_dev[dev]['dev_info']['size']
            #free = unused_dev[dev]['dev_info']['free']
            mount = unused_dev[dev]['dev_info']['mount']
            cache = unused_dev[dev]['dev_info']['cache']
            if self.config.cache_enable:
                if self.commoncache.is_running_coredev(dev):
                    status = unused_dev[dev]['dev_info']['status']
                    cache_mode = unused_dev[dev]['dev_info']['cache_mode']
                else:
                    status = 'None'
                    cache_mode = 'None'

            label = unused_dev[dev]['dev_info']['label']
            info = ''
            if mount is not None:
                info += 'mount:' + mount + ' '
            if cache is None:
                info += 'cache:Notsupport'
            else:
                info += 'cache:' + cache
            if status is None:
                info += ' status:None'
            else:
                info += ' status:' + status
            if cache_mode is None:
                info += ' cache_mode:None'
            else:
                info += ' cache_mode:' + cache_mode
            if dev in usable_disk:
                info += ' (usable)'
            print("  %-6s %-9s %-9s %s" % (dev, type, size, info))
            if 'part_info' in unused_dev[dev].keys():
                for part in sorted(unused_dev[dev]['part_info']):
                    type = unused_dev[dev]['part_info'][part]['type']
                    if type == 'extended':
                        print("  %-6s extended" %(dev))
                    else:
                        size = unused_dev[dev]['part_info'][part]['size']
                        fs = unused_dev[dev]['part_info'][part]['fs']
                        mount = unused_dev[dev]['part_info'][part]['mount']
                        info = ''
                        if mount is not None:
                            info += mount
                        print("  %-4s %-9s %-9s %-13s %s" %(part, type, size, fs, info))

    def __disk_list_showall(self, all_disk):
        sys_disk = {}
        sys_raid = {}
        sys_dev = {}
        used_disk = {}
        used_raid = {}
        used_dev = {}
        unused_disk = {}
        unused_raid = {}
        unused_dev = {}

        usable_disk = self.__disk_list_getusable(all_disk, False)
        lich_tier = self.__get_lich_tier()
        lich_writeback = self.__get_lich_writeback()

        # pprint.pprint(all_disk)

        for disk in all_disk:
            if all_disk[disk]['flag'] == 'lich':
                used_disk[disk] = all_disk[disk]
            elif all_disk[disk]['flag'] == 'new':
                unused_disk[disk] = all_disk[disk]
            elif all_disk[disk]['flag'] == 'sys':
                sys_disk[disk] = all_disk[disk]

        if len(sys_disk) != 0:
            print("sysdev:")
            for disk in sys_disk:
                if 'raid_info' in sys_disk[disk].keys():
                    adp = sys_disk[disk]['raid_info']['adp']
                    if adp not in sys_raid.keys():
                        sys_raid[adp] = {}
                        sys_raid[adp]['adp_name'] = sys_disk[disk]['raid_info']['adp_name']
                        sys_raid[adp]['adp_dev'] = {}
                    sys_raid[adp]['adp_dev'][disk] = sys_disk[disk]
                else:
                    sys_dev[disk] = sys_disk[disk]

            self.__show_sys_dev(sys_dev)
            self.__show_sys_raid(sys_raid)

        if len(used_disk) != 0:
            print("used:")
            for disk in used_disk:
                if used_disk[disk]['type'].startswith('disk'):
                    disk_num = int(used_disk[disk]['type'][4:])
                    if disk_num in lich_tier:
                        used_disk[disk]['tier'] = str(lich_tier[disk_num])
                    else:
                        used_disk[disk]['tier'] = 'None'
                    if disk_num in lich_writeback:
                        used_disk[disk]['writeback'] = lich_writeback[disk_num]
                    else:
                        used_disk[disk]['writeback'] = {'cache':0, 'cached':0}
                    if 'raid_info' in used_disk[disk].keys():
                        adp = used_disk[disk]['raid_info']['adp']
                        if adp not in used_raid.keys():
                            used_raid[adp] = {}
                            used_raid[adp]['adp_name'] = used_disk[disk]['raid_info']['adp_name']
                            used_raid[adp]['adp_dev'] = {}
                        used_raid[adp]['adp_dev'][disk] = used_disk[disk]
                    else:
                        used_dev[disk] = used_disk[disk]
                elif used_disk[disk]['type'].startswith('cache'):
                    used_dev[disk] = used_disk[disk]

            self.__show_used_dev(used_dev)
            self.__show_used_raid(used_raid, used_disk)

        # pprint.pprint(unused_disk)

        if len(unused_disk) != 0:
            print("unused:")
            for disk in unused_disk:
                if 'raid_info' in unused_disk[disk].keys():
                    adp = unused_disk[disk]['raid_info']['adp']
                    if adp not in unused_raid.keys():
                        unused_raid[adp] = {}
                        unused_raid[adp]['adp_name'] = unused_disk[disk]['raid_info']['adp_name']
                        unused_raid[adp]['adp_dev'] = {}
                    unused_raid[adp]['adp_dev'][disk] = unused_disk[disk]
                else:
                    unused_dev[disk] = unused_disk[disk]

            self.__show_unused_dev(unused_dev, usable_disk)
            self.__show_unused_raid(unused_raid, usable_disk)

    def __disk_list_cachedev(self, all_disk, cachedev):
        lich_tier = self.__get_lich_tier()
        lich_writeback = self.__get_lich_writeback()
        lich_type = all_disk[cachedev]['type']
        disk_type = all_disk[cachedev]['dev_info']['type']
        size = all_disk[cachedev]['dev_info']['size']
        print("%-6s %-15s %-9s %-9s" %(lich_type, cachedev, disk_type, size))

        for dev in self.commoncache.list_coredevs_by_cachedev(cachedev):
            if all_disk[dev]['type'].startswith('disk'):
                disk_num = int(all_disk[dev]['type'][4:])
                if disk_num in lich_tier:
                    all_disk[dev]['tier'] = str(lich_tier[disk_num])
                else:
                    all_disk[dev]['tier'] = 'None'
                if disk_num in lich_writeback:
                    all_disk[dev]['writeback'] = lich_writeback[disk_num]
                else:
                    all_disk[dev]['writeback'] = {'cache':0, 'cached':0}

            lich_type = all_disk[dev]['type']
            disk_type = all_disk[dev]['dev_info']['type']
            size = all_disk[dev]['dev_info']['size']
            if 'disk_stat' in all_disk[dev]:
                free = (int(all_disk[dev]['disk_stat']['total']) -
                        int(all_disk[dev]['disk_stat']['used'])) * (1024*1024)
                free += (int(all_disk[dev]['disk_stat']['wbtotal']) -
                        int(all_disk[dev]['disk_stat']['wbused'])) * (1024*1024)
                free = _human_readable(free)
            else:
                free = all_disk[dev]['dev_info']['free']

            mount = all_disk[dev]['dev_info']['mount']
            cache = all_disk[dev]['dev_info']['cache']
            label = all_disk[dev]['dev_info']['label']
            info = ''
            if lich_type.startswith('disk'):
                status = all_disk[dev]['dev_info']['status']
                cache_mode = all_disk[dev]['dev_info']['cache_mode']
                lich_pool = all_disk[dev]['pool']
                info += 'tier:' + all_disk[dev]['tier']
                if 'disk_stat' in all_disk[dev]:
                    if all_disk[dev]['disk_stat']['cache'] != '0':
                        info += ' writeback:' + _human_readable(int(all_disk[dev]['disk_stat']['wbtotal']) * (1024*1024))
                        if all_disk[dev]['disk_stat']['cached'] == '1':
                            info += ',Enabled'
                        else:
                            info += ',Disabled'
                else:
                    if all_disk[dev]['writeback']['cache'] != '0':
                        info += ' writeback:' + _human_readable(_human_unreadable(size) * \
                                int(all_disk[dev]['writeback']['cache']) / 100)
                        if all_disk[dev]['writeback']['cached'] == 1:
                            info += ',Enabled'
                        else:
                            info += ',Disabled'

                if cache is None:
                    info += ' cache:Notsupport'
                else:
                    info += ' cachedev:' + cache

                if label is not None and label != '':
                    info += ' label:' + label

                if status is None:
                    info += ' status:None'
                else:
                    info += ' status:' + status

                if cache_mode is None:
                    info += ' cache_mode:None'
                else:
                    info += ' cache_mode:' + cache_mode

                print("%-6s %-15s %-9s %-9s %-9s free:%-8s %s " %(lich_type, dev, lich_pool, disk_type, size, free, info))

    def list_all_used_disk(self):
        all_disk = self.__disk_list_getall()
        used_disk = []

        for disk in all_disk:
            if all_disk[disk]['flag'] == 'lich':
                used_disk.append(disk)

        return used_disk

    def disk_list(self, is_all, cachedev, is_json, verbose):
        self.__disk_check_env()

        all_disk = self.__disk_list_getall(is_all)
        if is_json:
            print json.dumps(all_disk)
        elif verbose:
            print json.dumps(all_disk, sort_keys=False, indent=4)
        else:
            if self.config.cache_enable and cachedev is not None:
                self.__disk_list_cachedev(all_disk, cachedev)
            else:
                self.__disk_list_showall(all_disk)

    def disk_list_with_return_json_value(self):
        self.__disk_check_env()
        all_disk = self.__disk_list_getall()
        return json.dumps(all_disk)

    def disk_speed(self, devs):
        for dev in devs:
            self.disk.get_dev_speed(dev, True)

    @staticmethod
    def _bind2uio(dev, pci, max_retry=60):
        udev = UIODevice(pci)
        udev.bind()

        retry = 0
        while True:
            if udev.is_binding():
                break

            retry += 1
            if retry > max_retry:
                raise Exp(errno.EINVAL, '_bind2uio fail: dev %s pci %s retry %d' % (dev, pci, retry))

            time.sleep(1)

    def __nvme2spdk(self, dev):
        '''
        echo '0000:05:00.0' > /sys/bus/pci/drivers/nvme/unbind
        '''
        pci = self.disk.get_dev_pci(dev)
        pci2 = pci.replace('.', ':', 2)

        retry = 0
        while True:
            _exec_system("echo '%s' > /sys/bus/pci/drivers/nvme/unbind" % pci2)
            if not os.path.exists(dev):
                break

            retry += 1
            if retry > self.wait_for_device_interval:
                raise Exp(errno.EINVAL, 'nvme to spdk fail: dev %s pci %s retry %d' % (dev, pci2, retry))

            time.sleep(1)

        self._bind2uio(dev, pci2, max_retry=self.wait_for_device_interval)

        return pci

    def __spdk2nvme(self, pci):
        '''
        echo '0000:05:00.0' > /sys/bus/pci/drivers/nvme/bind
        '''
        pci2 = pci.replace('.', ':', 2)

        udev = UIODevice(pci2)
        udev.unbind()

        _exec_system("echo '%s' > /sys/bus/pci/drivers/nvme/bind" % pci2)

        retry = 0
        while retry < self.wait_for_device_interval:
            time.sleep(1)
            dev = self.disk.get_dev_bypci(pci2)
            if dev is not None:
                break

            retry += 1

        if dev is None:
            raise Exp(errno.EINVAL, 'spdk to nvme fail: pci %s' % pci2)

        return dev

    def __spdk2uio(self, pci):
        pci2 = pci.replace('.', ':', 2)

        udev = UIODevice(pci2)
        udev.bind()

    def __disk_add_check(self, devs, force, cache, pool):
        instance = self.node.instences[0]
        if not instance.running():
            raise Exp(errno.EINVAL, 'lichd not running')

        self.__disk_check_env()
        lich_disk = self.__get_lich_disk()
        sys_dev = self.disk.get_sys_dev(False)
        sys_dev_part = self.disk.get_sys_dev(True)
        add_disk = []
        for dev in devs:
            if self.disk.dev_check_mounted(dev) and not force:
                raise Exp(errno.EINVAL, '%s or partition was mounted, please use --force' % dev)

            if not self.disk.is_block(dev):
                raise Exp(errno.EINVAL, '%s is not block device' % dev)
            if dev in [disk[1] for disk in lich_disk]:
                raise Exp(errno.EINVAL, '%s already used by lich' % dev)
            if dev in sys_dev or dev in sys_dev_part:
                raise Exp(errno.EINVAL, '%s mounted on /' % dev)
            if self.disk.is_mounted(dev) is not None and not force:
                raise Exp(errno.EINVAL, '%s mounted, please use --force' % dev)

            if self.disk.is_part(dev) and not force:
                raise Exp(errno.EINVAL, 'dev %s is a partition' %(dev))

            dev_parts = self.disk.get_dev_parts(dev)
            if len(dev_parts) and not force:
                raise Exp(errno.EINVAL, 'dev %s has partitions %s' %(dev, dev_parts))

            dev_info = self.disk.get_dev_info(dev)
            dev_type = dev_info['type']
            if dev_type == 'ISCSI':
                raise Exp(errno.EINVAL, 'can not add disk %s type(%s) to lich' % (dev, dev_type))
            if dev_type == 'UNKNOW' and not force:
                raise Exp(errno.EINVAL, 'can not add disk %s type(%s) to lich' % (dev, dev_type))

            if dev_type == 'RAID':
                if not force and \
                        not dev_info['raidcard'].startswith('LSI') and \
                        not dev_info['raidcard'].startswith('Hewlett-Packard'):
                    raise Exp(errno.EINVAL, 'not support %s raidcard: %s' %(dev, dev_info['raidcard']))
                if not self.raid:
                    self.raid = RAID(self.config)

                dev_info = self.raid.raid_info(dev)
                if dev_info is None:
                    return
                disk_info = self.raid.disk_info(dev_info['disk'][0])
                disk_type = disk_info['media_type']
                if cache != 0 and disk_type != 'SSD' and not force:
                    raise Exp(errno.EINVAL, 'disk %s type %s can not be set to cache' %(dev, disk_type))

                if self.cacheconf is None:
                    self.cacheconf = self.parse_cacheconf(self.config.cacheconf, devs)
                if disk_type == 'SSD':
                    self.raid.raid_check([self.disk.get_dev(dev)], self.cacheconf, True)
                else:
                    self.raid.raid_check([self.disk.get_dev(dev)], self.cacheconf, force)
            elif dev_type == 'NVMe':
                if self.config.spdk:
                    pci = self.__nvme2spdk(dev)
                    idx = devs.index(dev)
                    devs.pop(idx)
                    devs.insert(idx, pci)
            elif dev_type == 'SPDK':
                if self.config.spdk:
                    self.__spdk2uio(dev)
                else:
                    pci = self.__spdk2nvme(dev)
                    if pci is None:
                        raise Exp(errno.EINVAL, 'disk %s add fail, retry again' %(dev))
                    idx = devs.index(dev)
                    devs.pop(idx)
                    devs.insert(idx, pci)
            else:
                if not force and \
                        dev_info['cache'] != 'Enabled' and \
                        dev_info['cache'] != 'Disabled':
                    raise Exp(errno.EINVAL, 'not support %s cache: %s' %(self.disk.get_dev(dev), dev_info['cache']))
                if cache != 0 and dev_type != 'SSD' and not force:
                    raise Exp(errno.EINVAL, 'disk %s type %s can not be set to cache' %(dev, dev_type))

                self.__disk_check_cache({dev_type:[self.disk.get_dev(dev)]}, True)

            if dev in add_disk:
                raise Exp(errno.EINVAL, '%s repeat' % dev)
            else:
                add_disk.append(dev)

        try:
            self.__check_lich_speed()
            self.__check_lich_rotation()
        except:
            if not force:
                raise

        pool_list = self.pool_manage.pool_list()
        if pool != "default" and pool not in pool_list:
            raise Exp(errno.EPERM, "pool %s not found" % pool)
        elif pool == "default" and pool not in pool_list:
            self.pool_manage.pool_create(pool)

    def __disk_tier_adjust(self, pool, disk_num = None, disk_speed = None, p=False):
        lich_tier = self.__get_lich_tier()
        lich_speed = self.__get_lich_speed()
        lich_mask = {}
        idx_tier = -1
        cur_tier = -1

        for disk in lich_tier:
            if disk in lich_speed:
                lich_mask[disk] = self.__get_disk_tiermask(lich_speed[disk])

        if disk_num is not None and disk_speed is not None:
            lich_mask[disk_num] = self.__get_disk_tiermask(disk_speed)
        lich_mask = self.__get_disk_tier(lich_mask)

        sort_tier = sorted(lich_mask.items(), key=lambda d: d[1])
        for disk in sort_tier:
            tier = disk[1]
            if tier != cur_tier:
                cur_tier = tier
                if idx_tier != DISK_MAX_TIER:
                    idx_tier += 1
            lich_tier[disk[0]] = idx_tier

        if p:
            print "tier:", sorted(lich_tier.items(), key=lambda d: d[1])
        return lich_tier

    def __disk_tier_adjust_new(self, pool, disk_num = None, disk_speed = None, disk_rotation = None, p=False):
        lich_link = self.get_pool_link(pool)
        lich_tier = self.__get_pool_tier(pool)
        lich_speed = self.__get_pool_speed(pool)
        lich_rotation = self.__get_pool_rotation(pool)
        lich_mask = {}
        idx_tier = -1
        cur_tier = -1

        if disk_num is not None and disk_speed is not None:
            lich_speed[disk_num] = disk_speed
            lich_link[disk_num] = None
        if disk_num is not None and disk_rotation is not None:
            lich_rotation[disk_num] = disk_rotation
            lich_link[disk_num] = None

        lich_mask = self.__get_disk_tiermask_withspeed(lich_speed)
        #if lich_mask.keys() == lich_link.keys() and \
        #        lich_rotation.keys() == lich_link.keys():
        #    lich_mask = self.__get_disk_tiermask_withrotation(lich_mask, lich_rotation)
        lich_mask = self.__get_disk_tier_withmask(lich_mask)

        sort_tier = sorted(lich_mask.items(), key=lambda d: d[1])
        for disk in sort_tier:
            tier = disk[1]
            if tier != cur_tier:
                cur_tier = tier
                if idx_tier != DISK_MAX_TIER:
                    idx_tier += 1
            lich_tier[disk[0]] = idx_tier

        if p:
            print "tier:", sorted(lich_tier.items(), key=lambda d: d[1])
        return lich_tier

    def __disk_tier_update(self, lich_tier):
        for disk in lich_tier:
            _exec_system("echo %d > %s" %(lich_tier[disk], os.path.join(self.tier_path, str(disk) + ".tier")), False)

    def __disk_add_speed(self, disk_num, disk_speed):
        _exec_system("echo %d >  %s" % (disk_speed, os.path.join(self.speed_path, str(disk_num) + ".speed")))

    def __disk_add_rotation(self, disk_num, disk_rotation):
        if disk_rotation is not None:
            _exec_system("echo %d >  %s" % (disk_rotation, os.path.join(self.rotation_path, str(disk_num) + ".rotation")))

    def __disk_add_tier_withspeed(self, pool, disk_num, disk_speed, disk_rotation):
        lich_tier = self.__disk_tier_adjust_new(pool, disk_num, disk_speed, disk_rotation, True)

        self.__disk_tier_update(lich_tier)
        self.__disk_add_speed(disk_num, disk_speed)
        self.__disk_add_rotation(disk_num, disk_rotation)

    def __disk_add_tier_withtype(self, disk_num, disk_type):
        lich_tier = 0 if disk_type == 'SSD' else 1
        print "add disk %s tier %s" %(disk_num, lich_tier)
        self.__disk_tier_update({disk_num:lich_tier})

    def __disk_add_link(self, dev, disk_num, pool):
        '''ln -s /dev/sda /opt/fusionstack/data/disk/0.disk'''

        disk_path = self.lichDisk.get_path(disk_num, 'disk')
        dev_type = self.disk.get_dev_type(dev)
        if dev_type == 'SPDK':
            target = os.path.join(self.lichDisk.nvme_prefix(), LichDisk.nvme_device(pool, disk_num, dev))
            ensure_path(self.lichDisk.nvme_prefix())
            _exec_system("touch " + target)
        else:
            target = dev
        cmd = "ln -s %s %s" % (target, disk_path)
        _exec_system(cmd)

    def __disk_add_tier(self, pool, dev, disk_num):
        if self.tier_withtype:
            dev_type = self.disk.get_dev_type(dev)
            if dev_type == 'RAID':
                if not self.raid:
                    self.raid = RAID(self.config)
                dev_info = self.raid.raid_info(dev)
                disk_info = self.raid.disk_info(dev_info['disk'][0])
                disk_type = disk_info['media_type']
            else:
                disk_type = dev_type
            return self.__disk_add_tier_withtype(disk_num, disk_type)
        else:
            ''' get disk speed must before add link, because get speed need write test '''
            print "get %s speed start.." % dev
            disk_speed = self.disk.get_dev_speed(dev)
            print "get %s speed %d" % (dev, disk_speed)

            disk_rotation = self.__get_disk_rotation(dev)
            return self.__disk_add_tier_withspeed(pool, disk_num, disk_speed, disk_rotation)

    def __disk_add_tier_retry(self, pool, dev, disk_num, retry=3):
        done = False
        while not done and retry > 0:
            try:
                self.__disk_add_tier(pool, dev, disk_num)
                done = True
            except:
                _dwarn("__disk_add_tier pool %s dev %s disk %s retry %d" % (pool, dev, disk_num, retry))
                traceback.print_exc()

                retry -= 1
                time.sleep(1)

        return done

    def __disk_add_block(self, dev, disk_num, pool, cache, cset_uuid):
        clusteruuid = self.config.getclusteruuid()
        hostname = self.config.hostname

        meta = {
            'cluster': clusteruuid,
            'node': hostname,
            'type': 'new',
            'disk': disk_num,
            'pool': pool,
            'cache': cache,
            'cached': 0,
            'cset': cset_uuid
        }
        self.bdev.write_meta(dev, meta)

    def __disk_add_disk(self, devs, force, cache, pool, cset_uuid):
        for dev in devs:
            if self.disk.is_mounted(dev) is not None and force:
                self.disk.dev_umount(dev)
            elif self.disk.is_mounted(dev):
                raise Exp(errno.EINVAL, '%s mounted, please use --force' % dev)

            new = True

            match = self.__disk_load_valid(dev)
            if match:
                if match[0].startswith('disk'):
                    _dwarn("disk %s used by lich %s, please restart lichd, "
                            "or cleanup disk use `dd if=/dev/zero of=%s bs=1M count=1'" % (dev, match[0], dev))
                    continue
                else:
                    _dwarn('disk %s used by lich %s, please restart lichd' % (dev, match[0]))
                    continue

            if new:
                disk_num = self.lichDisk.get_next_disk_num()

            dev_type = self.disk.get_dev_type(dev)
            if dev_type == 'SPDK':
                try:
                    # lichd will fnotify
                    self.__disk_add_link(dev, disk_num, pool)
                except Exception, e:
                    _derror("%s add link failed:%s" % (dev, e))
                    raise
                continue

            if cache != 100:
                try:
                    self.__disk_add_tier(pool, self.disk.get_dev(dev), disk_num)
                except Exception, e:
                    raise

            if new:
                self.__disk_add_block(dev, disk_num, pool, cache, cset_uuid)

            try:
                # lichd will fnotify
                self.__disk_add_link(dev, disk_num, pool)
            except Exception, e:
                _derror("%s add link failed:%s" % (dev, e))
                raise

    def __disk_add_wait(self):
        disk_path = os.path.join(self.config.home, 'data/disk/disk')

        for disk in os.listdir(disk_path):
            disk_num = disk.split('.')[0]
            if not self.lichDisk.is_info_exists(disk_num):
                return True
            if not self.lichDisk.is_bitmap_exists(disk_num):
                return True

        return False

    def __disk_add_reset(self):
        while self.__disk_add_wait():
            time.sleep(1)

        instance = self.node.instences[0]
        instance.stop()
        _exec_system("rm -rf %s/hsm" % self.config.shm)
        instance.start()

    def disk_add(self, devs, v, force, cache, pool, cset_uuid=ZERO_CSET_UUID):
        if len(devs) == 1 and devs[0] == 'all':
            all_disk = self.__disk_list_getall()
            devs = self.__disk_list_getusable(all_disk, force)
            if len(devs):
                _dmsg("add disk %s" % devs)
            else:
                _dmsg("no usable disk found!")

        if len(devs):
            if pool is None:
                pool = 'default'

            self.__disk_add_check(devs, force, cache, pool)

            fd = _lock_file1("/var/run/add_disk.lock")

            old_tier = self.__get_pool_tier(pool)
            self.__disk_add_disk(devs, force, cache, pool, cset_uuid)
            new_tier = self.__get_pool_tier(pool)

            _unlock_file1(fd)

            for tier in old_tier:
                if new_tier[tier] != old_tier[tier]:
                    self.__disk_add_reset()
                    break

    def __disk_del_check(self, devs):
        lich_disk = self.__get_lich_disk()
        del_disk = []
        for dev in devs:
            if dev not in [disk[1] for disk in lich_disk]:
                raise Exp(errno.EINVAL, '%s not used by lich' % dev)
            elif dev not in [disk[1] for disk in lich_disk if disk[0].startswith('disk')]:
                raise Exp(errno.EINVAL, 'only support delete data disk')
            if dev in del_disk:
                raise Exp(errno.EINVAL, '%s repeat' % dev)
            else:
                del_disk.append(dev)

    def __disk_del_superblock(self, dev):
        buff = '\0' * FILE_BLOCK_LEN
        fd = open(dev, 'wb')
        fd.write(buff)
        fd.close()

    def __disk_del_wait(self, dev, disk_num):
        used = 0
        while (used < 10*60*60):
            if not self.lichDisk.is_info_exists(disk_num):
                return

            sys.stdout.write(str(10*60*60 - int(used)) + "\r")
            sys.stdout.flush()
            used += 1
            time.sleep(1)

        raise Exp(errno.EPERM, "delete disk %s failed" % dev)

    def __disk_del_info(self, disk_num, p=True):
        for item in ['tier', 'speed', 'rotation']:
            path = self.lichDisk.get_path(disk_num, item)
            if os.path.exists(path):
                ret = _exec_system("rm -rf " + path, p)
                if ret:
                    _derror("delete %s failed." % path)

    def __disk_del_link(self, disk_num):
        '''rm -rf /opt/fusionstack/data/disk/0.disk'''
        lich_disk = str(disk_num) + ".disk"
        path = os.path.join(self.disk_home, lich_disk)

        # TODO if NVMe
        if not os.path.exists(path):
            return

        rpath = os.path.realpath(path)
        if os.path.isfile(rpath):
            bname = os.path.basename(rpath)
            if bname.startswith('pci_'):
                _exec_system("rm -rf " + rpath)

        _exec_system("rm -rf " + path)

    def __disk_del_spdk(self, dev, disk_num, pool):
        target = os.path.join(self.lichDisk.nvme_prefix(), LichDisk.nvme_device(pool, disk_num, dev))
        _exec_system("rm -rf " + target)

    def __disk_del_disk(self, devs):
        lich_disk = self.__get_lich_disk()
        for dev in devs:
            disk_num = None
            for lich_dev in lich_disk:
                if dev == lich_dev[1]:
                    m = re.match('(\D+)(\d+)', lich_dev[0])
                    if m is not None:
                        disk_num = m.group(2)

            #cmd = "%s --castoff %s/%s"%(self.config.admin, self.node.instences[0].name, disk_num)
            #ret = _exec_system(cmd)
            #if ret:
            #    _derror("%s delete failed." % dev)
            #    continue

            self.node.node_drop(int(disk_num))

            self.__disk_del_info(disk_num, False)

            #lich_tier = self.__disk_tier_adjust()
            #self.__disk_tier_update(lich_tier)

            self.__disk_del_superblock(dev)

    def __disk_del_dev_disk(self, devs):
        lich_disk = self.__get_lich_disk(False)
        # i.e. [('disk0', '/dev/sdb', 'p1')]
        for dev in devs:
            for lich_dev in lich_disk:
                if dev == lich_dev[1]:
                    m = re.match('(\D+)(\d+)', lich_dev[0])
                    if m is not None:
                        disk_num = m.group(2)

                        dev_disk_path = os.path.join(self.disk_home, disk_num + '.disk')
                        if os.path.exists(dev_disk_path):
                            ret = _exec_system("rm -rf " + dev_disk_path, False)
                            if ret:
                                _derror("delete %s tier failed." % dev)

    def disk_del(self, devs, v):
        drop_with_castoff = self.config.drop_with_castoff
        if drop_with_castoff:
            self.__disk_del_check(devs)
            self.__disk_del_disk(devs)
        else:
            self.__disk_del_dev_disk(devs)

    def __disk_check_cache(self, devs, force=False, setcache=True):
        if self.cacheconf is None:
            self.cacheconf = self.parse_cacheconf(self.config.cacheconf)
        if len(self.cacheconf) == 0:
            return {}

        cache_stat = {}
        for dev_type in devs:
            for dev in devs[dev_type]:
                if dev in self.cacheconf:
                    disk = dev
                else:
                    disk = dev_type.lower()

                if disk == 'unknow':
                    if force:
                        continue
                    else:
                        raise Exp(errno.EINVAL, 'not support %s type: %s' %(dev, disk))

                if disk == 'nvme':
                    continue
                if 'skip'in self.cacheconf[disk] and self.cacheconf[disk]['skip']:
                    continue
                if 'disk_cache' in self.cacheconf[disk]:
                    disk_cache = self.cacheconf[disk]['disk_cache']
                else:
                    disk_cache = 'disable'

                dev_cache = self.disk.get_dev_cache(dev)
                if dev_cache is not None and dev_cache.lower() != disk_cache:
                    if setcache:
                        _dmsg("set %s cache to %s" % (dev, disk_cache))
                        self.disk.set_dev_cache(dev, disk_cache, force)
                    else:
                        cache_stat[dev] = {'disk_cache': disk_cache}

        return cache_stat

    def __disk_check_split(self):
        raid_devs  = []
        hdd_devs = {'HDD':[], 'SSD':[]}
        if (self.config.testing):
            return raid_devs, hdd_devs

        lich_dev = self.__get_lich_disk()
        for (x, dev, pool) in lich_dev:
            if not dev:
                continue
            dev = self.disk.get_dev(dev)
            if dev is None:
                continue
            if self.disk.is_hba(dev):
                continue
            dev_type = self.disk.get_dev_type(dev)
            if dev_type == 'RAID' and dev not in raid_devs:
                raid_devs.append(dev)
            elif dev_type == 'HDD' and dev not in hdd_devs['HDD']:
                hdd_devs['HDD'].append(dev)
            elif dev_type == 'SSD' and dev not in hdd_devs['SSD']:
                hdd_devs['SSD'].append(dev)
            else:
                pass

        sys_dev = self.disk.get_sys_dev()
        for dev in sys_dev:
            if self.disk.is_hba(dev):
                continue
            dev_type = self.disk.get_dev_type(dev)
            if dev_type == 'RAID' and dev not in raid_devs:
                raid_devs.append(dev)
            elif dev_type == 'HDD' and dev not in hdd_devs['HDD']:
                hdd_devs['HDD'].append(dev)
            elif dev_type == 'SSD' and dev not in hdd_devs['SSD']:
                hdd_devs['SSD'].append(dev)

        return raid_devs, hdd_devs

    def __disk_check_health(self):
        if not self.raid:
            self.raid = RAID(self.config)

        raid_info = {}
        if self.raid.raid_type == 'MegaRAID':
            mega_raid = MegaRAID()
            raid_info = mega_raid.get_all_ldpdinfo()

        disk_info = self.disk_list_with_return_json_value()
        all_disks = json.loads(disk_info)

        used_disk = {}
        for disk in all_disks:
            if all_disks[disk]['flag'] == 'lich':
                used_disk[disk] = all_disks[disk]

        for disk in used_disk:
            if 'raid_info' in used_disk[disk].keys():
                if used_disk[disk]['raid_info']['adp_type'] == 'LSI':
                    (adpid, dev_vd) = mega_raid.get_dev_vd(disk)
                    cmd = 'smartctl -d megaraid,%s %s -A' % (raid_info[adpid][dev_vd], disk)
                    cmd = cmd.split()
                    try:
                        (out_msg, err_msg) = _exec_pipe1(cmd, 0, False)
                    except Exp, e:
                        _syswarn(" smartctl, %s" % err_msg)
                        continue

                else:
                    _syswarn(" 3007  Don't support raid type: %s(%s)" % (used_disk[disk]['raid_info']['adp_type'], disk))
                    continue

            else:
                cmd = 'smartctl -i %s -A' % disk
                cmd = cmd.split()
                try:
                    (out_msg, err_msg) = _exec_pipe1(cmd, 0, False)
                except Exp, e:
                    _syswarn(" smartctl, %s" % err_msg)
                    continue

            num = out_msg.count('\n')
            lines = out_msg.splitlines(num)
            for line in lines:
                if 'Reallocated_Sector_Ct' in line:
                    if int(line.split()[-1]) > 0:
                        _syserror(' 3006  %s:%s' % (disk.split('/')[-1], line))

    def disk_check(self, arg):
        if self.cacheconf is None:
            self.cacheconf = self.parse_cacheconf(self.config.cacheconf)

        if arg == 'cache':
            if not self.raid:
                self.raid = RAID(self.config)

            (raid_devs, hdd_devs) = self.__disk_check_split()

            # pprint.pprint(raid_devs)
            # pprint.pprint(hdd_devs)

            if len(raid_devs) != 0:
                self.raid.raid_check(raid_devs, self.cacheconf, True)

            if len(hdd_devs) != 0:
                self.__disk_check_cache(hdd_devs, True)
        elif arg == 'cacheset':
            if not self.raid:
                self.raid = RAID(self.config)

            (raid_devs, hdd_devs) = self.__disk_check_split()

            if len(raid_devs) != 0:
                self.raid.raid_check(raid_devs, self.cacheconf, True)

            if len(hdd_devs) != 0:
                self.__disk_check_cache(hdd_devs)
        elif arg == 'cachestat':
            cachestat = {}
            if not self.raid:
                self.raid = RAID(self.config)
            (raid_devs, hdd_devs)  = self.__disk_check_split()

            if len(raid_devs) != 0:
                stat = self.raid.raid_check(raid_devs, self.cacheconf, True, False)
                cachestat.update(stat)
            if len(hdd_devs) != 0:
                stat = self.__disk_check_cache(hdd_devs, True, False)
                cachestat.update(stat)

            return cachestat
        elif arg == 'tier':
            pool = self.pool_manage.pool_list()
            for p in pool:
                lich_tier = self.__get_pool_tier(p)
                if self.tier_withtype:
                    print p, ":", lich_tier
                else:
                    tier_adjust = self.__disk_tier_adjust_new(p)
                    if lich_tier == tier_adjust:
                        print p, ":", lich_tier
                    else:
                        print p, "tier:", lich_tier, "should:", tier_adjust
        elif arg == 'writeback':
            print self.__get_lich_writeback()
        elif arg == 'speed':
            pool = self.pool_manage.pool_list()
            for p in pool:
                print p, ":", self.__get_pool_speed(p)
        elif arg == 'rotation':
            pool = self.pool_manage.pool_list()
            for p in pool:
                print p, ":", self.__get_pool_rotation(p)
        elif arg == 'health':
            self.__disk_check_health()
        else:
            raise Exp(errno.EINVAL, '%s is invalid argument, use --help for help' % arg)

    def raid_add(self, devs, force):
        if not self.raid:
            self.raid = RAID(self.config)

        if (len(devs) == 1 and devs[0] == 'all'):
            devs = []
            new_raid_disk = self.raid.disk_list()
            if new_raid_disk:
                for adp, disks in new_raid_disk.iteritems():
                    for disk in disks:
                        devs.append(disks[disk]['inq'])

            if new_raid_disk:
                if len(new_raid_disk):
                    _dmsg("add raid %s" % devs)
                else:
                    _dmsg("no valid disk found!")
            else:
                _dmsg("no valid disk found!")

        new_disk = []
        all_disk = self.disk.get_all_devs()
        for dev in devs:
            self.raid.raid_add(dev, force)
            now_disk = self.disk.get_all_devs()
            for disk in now_disk:
                if disk not in all_disk:
                    new_disk.append(disk)
            all_disk = now_disk

        return new_disk

    def __raid_del_check(self, devs, force):
        lich_disk = self.__get_lich_disk()
        sys_dev = self.disk.get_sys_dev()
        for dev in devs:
            if not self.disk.is_dev(dev):
                raise Exp(errno.EINVAL, "%s is not block device" % dev)
            if self.disk.get_dev_type(dev) != 'RAID':
                raise Exp(errno.EPERM, 'can not del disk %s, maybe not raid disk' % dev)
            if dev in [disk[1] for disk in lich_disk]:
                raise Exp(errno.EINVAL, '%s used by lich' % dev)
            if dev in sys_dev:
                raise Exp(errno.EINVAL, "can not delete system device")
            if self.disk.is_mounted(dev) is not None and not force:
                raise Exp(errno.EINVAL, '%s mounted, please use --force' % dev)

    def __raid_del_disk(self, devs, force):
        if not self.raid:
            self.raid = RAID(self.config)
        for dev in devs:
            if self.disk.is_mounted(dev) is not None and force:
                self.disk.dev_umount(dev, True)
            elif self.disk.is_mounted(dev) is not None:
                raise Exp(errno.EINVAL, '%s mounted, please use --force' % dev)
            self.raid.raid_del(dev, force)

    def raid_del(self, devs, force):
        self.__raid_del_check(devs, force)
        self.__raid_del_disk(devs, force)

    def raid_miss(self):
        if not self.raid:
            self.raid = RAID(self.config)

        return self.raid.raid_miss()

    def raid_load(self, clear=False):
        if not self.raid:
            self.raid = RAID(self.config)

        try:
            return self.raid.raid_import(clear)
        except Exp, e:
            # _dwarn(e.err)
            return False

    def raid_flush(self):
        if not self.raid:
            self.raid = RAID(self.config)

        self.raid.raid_flush()

    def __raid_cache_check(self, devs):
        for dev in devs:
            if self.disk.get_dev_type(dev) != 'RAID':
                raise Exp(errno.EPERM, 'can not set disk %s raid cache' % dev)

    def raid_cache(self, switch, devs, policy):
        if not self.raid:
            self.raid = RAID(self.config)
        if len(devs) == 0:
            (raid_devs, hdd_devs) = self.__disk_check_split()
            sys_dev = self.disk.get_sys_dev()
            for dev in sys_dev:
                dev_type = self.disk.get_dev_type(dev)
                if dev_type == 'RAID' and dev not in raid_devs:
                    raid_devs.append(dev)
            devs = raid_devs

        if not policy:
            self.__raid_cache_check(devs)
        self.raid.raid_cache(switch, devs, self.cacheconf, policy)

    def __disk_light_check(self, devs):
        for dev in devs:
            if dev.startswith('/dev/') and self.disk.get_dev_type(dev) != 'RAID':
                raise Exp(errno.EPERM, 'can not set disk %s light flash' % dev)

    def disk_light(self, switch, devs):
        if not self.raid:
            self.raid = RAID(self.config)

        self.__disk_light_check(devs)
        self.raid.raid_light(switch, devs)

    def pool_gc_sqlite(self, pool):
        sql = Sqlite()
        return sql.pool_del(pool=pool)

    def pool_gc(self, pools):
        """
        remove unuseful disk files

        :param pools:
        :return:
        """
        unuse_pools = []

        _dwarn('remove disk files not in pools: %s' % pools)

        lich_disk = self.__get_lich_disk()
        for disk, dev, pool in lich_disk:
            if disk in ['meta', 'wlog']:
                continue

            if not pool:
                continue

            if pool in pools:
                continue

            # print disk_name.disk, disk_name.num, dev, pool

            if pool not in unuse_pools:
                unuse_pools.append(pool)

            self.lichDisk.unlink(disk)

        os.system('rm -rf /dev/shm/lich4/maping/*')

        for pool in unuse_pools:
            _dwarn('remove pool db data: %s' % pool)
            self.pool_gc_sqlite(pool)


if __name__ == '__main__':
    from config import Config
    config = Config()

    bdev = BcacheDev(config)
    print bdev.read_meta('/dev/bcache0', match_cluster_and_node=True)
    print bdev.read_meta('/dev/bcache0', match_cluster_and_node=False)

    print bdev.read_meta2('/dev/bcache0')

    lichDisk = LichDisk(config)
    print lichDisk.get_next_disk_num()
    print lichDisk.get_next_disk_num()
