#!/usr/bin/env python2
#coding=utf-8
import os
import errno
import re
import ConfigParser
import json
import socket
import time
import mmap
import shutil
import sys

from disk_tool import DiskTool
from raid.raid import RAID
from disk.disk import DISK
from cache_manage import CommonCache
from pool_manage import PoolManage
from utils import Exp, _dmsg, _dwarn, _derror, _human_readable, _human_unreadable, _exec_pipe1, _exec_system, \
        _str2dict, _syserror, _syswarn, _lock_file1, _unlock_file1, _file_read
from bcache import Bcache

DISK_MAX_TIER = 1
DISK_MAX_NUM = 255
# struct ext4_super_block offset = 1024; ext4_super_block.s_reserved offset = 588;
VALID_DATA_OFFSET = 1024 + 588
# sizeof(ext4_super_block.s_reserved) = 109
VALID_DATA_LEN = 109 + 1024
LICH_SPECIAL_POOL = ["__sysdev__", "__metadev__", "__wlogdev__", "__newdev__"]


class DiskManage(object):
    def __init__(self, node):
        self.node = node
        self.config = node.config
        self.disktool = DiskTool()
        self.raid = RAID(self.disktool)
        self.commoncache = CommonCache(node)
        self.disk = DISK(self.config, self.disktool, self.raid, self.commoncache)

        self.disk_home = os.path.join(self.config.home, 'data/disk/disk')
        #self.cacheconf = self.parse_cacheconf(self.config.cacheconf)
        self.cacheconf = None

        self.tier_withtype = False
        self.tier_path = os.path.join(self.config.home, 'data/disk/tier')
        self.speed_path = os.path.join(self.config.home, 'data/disk/speed')
        self.rotation_path = os.path.join(self.config.home, 'data/disk/rotation')
        self.disk_search = None
        self.pool_manage = PoolManage(self.config)

    def ssd_models(self):
        models = []
        path = os.path.join(self.config.home, 'etc/ssd.models')
        if not os.path.exists(path):
            return []

        with file(path, 'r') as fd:
            for line in fd.readlines():
                line = line.strip()
                if not len(line) or line.startswith('#'):
                    continue
                models.append(line)

        return models

    def parse_cacheconf(self, cacheconf, devs=None):
        cache = {}

        if (self.config.testing):
            return cache

        lich_disk = self.__get_lich_disk()
        sys_dev = self.disktool.get_sys_dev()
        for dev in cacheconf:
            if dev == 'ssd' or dev == 'hdd':
                cache[dev] = cacheconf[dev]
            elif dev.startswith('disk'):
                for item in lich_disk:
                    if item[0] == dev:
                        cache[item[1]] = cacheconf[dev]
            elif dev == 'sysdev':
                for item in sys_dev:
                    cache[item] = cacheconf[dev]

        models = self.ssd_models()
        for pool in lich_disk:
            for disk in lich_disk[pool]:
                dev = lich_disk[pool][disk]
                if disk.startswith('disk'):
                    dev_type = 'UNKNOW'
                    try:
                        dev_type = self.disktool.get_dev_type(dev)
                    except:
                        continue
                    if dev_type != 'RAID':
                        continue

                    dev = self.disktool.get_dev(dev)
                    model = self.raid.disk_model(dev)
                    if any(m in model for m in models):
                        if dev not in cache and not cache['ssd']['skip']:
                            cache[dev] = {}
                            cache[dev]['raid_cache'] = 'disable'
                            cache[dev]['disk_cache'] = 'enable'

        return cache

    def __get_disk_num(self):
        disk_num = 0
        disk_path = os.path.join(self.config.home, 'data/disk/disk')
        bitmap_path = os.path.join(self.config.home, 'data/disk/bitmap')
        info_path = os.path.join(self.config.home, 'data/disk/info')
        while disk_num <= DISK_MAX_NUM:
            if os.path.exists(disk_path) and str(disk_num) + '.disk' in os.listdir(disk_path):
                disk_num += 1
            elif os.path.exists(bitmap_path) and str(disk_num) + '.bitmap' in os.listdir(bitmap_path):
                disk_num += 1
            elif os.path.exists(info_path) and str(disk_num) + '.info' in os.listdir(info_path):
                disk_num += 1
            else:
                break

        if disk_num > DISK_MAX_NUM:
            raise Exp(errno.EPERM, 'There is no number left')
        return disk_num

    def __get_disk_tiermask(self, disk_speed):
        mask = 0
        while disk_speed:
            disk_speed /= 10
            mask += 1

        return mask

    def __get_disk_tiermask_withspeed(self, lich_speed):
        mask = 0
        lich_mask = {}
        lich_mask_new = {}

        sort_speed = sorted(lich_speed.items(), key=lambda d: d[1])
        for i in range(len(sort_speed)):
            if mask not in lich_mask:
                lich_mask[mask] = []
            lich_mask[mask].append(sort_speed[i][0])
            if i == len(sort_speed) - 1:
                break
            if sort_speed[i + 1][1] > 2 * sort_speed[i][1]:
                mask += 1

        for i in lich_mask:
            for j in lich_mask[i]:
                lich_mask_new[j] = i

        return lich_mask_new

    def __get_disk_tier_withmask(self, lich_tier):
        max_tier = 0
        for disk in lich_tier:
            if lich_tier[disk] > max_tier:
                max_tier = lich_tier[disk]

        for disk in lich_tier:
            lich_tier[disk] = max_tier - lich_tier[disk]

        return lich_tier

    def __get_disk_tiermask_withrotation(self, lich_tier, lich_rotation):
        rotation_arr = []
        tier = 0
        for i in lich_rotation:
            if lich_rotation[i] not in rotation_arr:
                rotation_arr.append(lich_rotation[i])
        rotation_arr.sort()
        for i in lich_tier:
            tier = rotation_arr.index(lich_rotation[i])
            lich_tier[i] = tier

        return lich_tier

    def __get_lich_info(self, path):
        lich_info = {}
        if os.path.exists(path):
            for info in os.listdir(path):
                disk_num = int(info.split('.')[0])
                fp = open(os.path.join(path, info))
                value = fp.read().strip('\n').strip('\0')
                fp.close()
                try:
                    lich_info[disk_num] = int(value)
                except:
                    pass

        return lich_info

    def __get_pool_info(self, path, pool):
        pool_info = {}
        lich_info = self.__get_lich_info(path);
        pool_disk = self.get_pool_disk(pool)
        for i in pool_disk:
            if i in lich_info:
                pool_info[i] = lich_info[i]

        return pool_info

    def __get_lich_tier(self):
        return self.__get_lich_info(self.tier_path)

    def __get_pool_tier(self, pool):
        return self.__get_pool_info(self.tier_path, pool)

    def __get_lich_writeback_fromdisk(self):
        lich_writeback = {}
        clusteruuid = self.config.getclusteruuid()
        hostname = self.config.hostname
        lich_link = self.__get_lich_link()
        for i in lich_link:
            dev = lich_link[i]
            lich_writeback[i] = {}

            valid = self.disk.disk_load_valid(dev, clusteruuid, hostname)
            if valid:
                lich_writeback[i]['type'] = valid['type']
                lich_writeback[i]['cache'] = valid['cache']
                lich_writeback[i]['cached'] = valid['cached']
            else:
                lich_writeback[i]['type'] = 'UNKNOW'
                lich_writeback[i]['cache'] = 0
                lich_writeback[i]['cached'] = 0


        return lich_writeback

    def __get_lich_writeback_fromdata(self):
        lich_writeback = {}

        block_path = os.path.join(self.config.home, 'data/disk/block')
        clusteruuid = self.config.getclusteruuid()
        hostname = self.config.hostname

        if not os.path.exists(block_path):
            return lich_writeback

        for disk in os.listdir(block_path):
            i = int(disk.split('.')[0])
            disk_path = os.path.join(block_path, disk)
            buff = _file_read(disk_path, VALID_DATA_OFFSET, VALID_DATA_LEN)

            lich_writeback[i] = {}
            m = re.match('cluster=%s;node=%s;type=(\w+);disk=%s;cache=(\d+);cached=(\d+);' %
                    (clusteruuid, hostname, i), buff)
            if m:
                lich_writeback[i]['type'] = m.group(1)
                lich_writeback[i]['cache'] = int(m.group(2))
                lich_writeback[i]['cached'] = int(m.group(3))
            else:
                lich_writeback[i]['type'] = 'UNKNOW'
                lich_writeback[i]['cache'] = 0
                lich_writeback[i]['cached'] = 0

        return lich_writeback

    def __get_lich_writeback(self):
        return self.__get_lich_writeback_fromdisk()

    def __check_lich_speed(self):
        lich_link = self.__get_lich_link()
        lich_writeback = self.__get_lich_writeback_fromdisk()

        for i in lich_link:
            if lich_writeback[i]['type'] != 'data' or lich_writeback[i]['cache'] == 100:
                continue

            path = os.path.join(self.speed_path, str(i) + '.speed')
            if not os.path.exists(path):
                print "get %s speed start..." % lich_link[i]
                disk_speed = self.disk.get_disk_speed(lich_link[i])
                print "get %s speed %d" %(lich_link[i], disk_speed)
                _exec_system("echo %d > %s" %(disk_speed, path), True)

        for info in os.listdir(self.speed_path):
            disk_num = int(info.split('.')[0])
            if disk_num not in lich_link:
                _exec_system("rm -rf " + os.path.join(self.speed_path, info), True)

    def __get_lich_speed(self):
        return self.__get_lich_info(self.speed_path)

    def __get_pool_speed(self, pool):
        return self.__get_pool_info(self.speed_path, pool)

    def __check_lich_rotation(self):
        lich_link = self.__get_lich_link()
        lich_writeback = self.__get_lich_writeback_fromdisk()

        for i in lich_link:
            if lich_writeback[i]['type'] != 'data' or lich_writeback[i]['cache'] == 100:
                continue

            path = os.path.join(self.rotation_path, str(i) + '.rotation')
            if not os.path.exists(path):
                disk_rotation = self.__get_disk_rotation(lich_link[i])
                if disk_rotation is not None:
                    _exec_system("echo %d > %s" %(disk_rotation, path), True)

        for info in os.listdir(self.rotation_path):
            disk_num = int(info.split('.')[0])
            if disk_num not in lich_link:
                _exec_system("rm -rf " + os.path.join(self.rotation_path, info), True)

    def __get_lich_rotation(self):
        return self.__get_lich_info(self.rotation_path)

    def __get_pool_rotation(self, pool):
        return self.__get_pool_info(self.rotation_path, pool)

    def __get_disk_stat(self, lich_disk):
        disk_stat = {}
        diskstat = os.path.join(self.config.shm, 'nodectl/diskstat')

        if os.path.exists(diskstat):
            for pool in lich_disk:
                for disk in lich_disk[pool]:
                    if not disk.startswith('disk'):
                        continue

                    disk_num = disk[4:]
                    stat_file = os.path.join(diskstat, "%s/%s.stat" % (pool, disk_num))
                    if not os.path.exists(stat_file):
                        continue

                    o = open(stat_file)
                    res = o.read()
                    o.close()
                    d = _str2dict(res)
                    disk_stat['disk' + disk_num] = d

        return disk_stat

    def __get_disk_pool(self, dev):
        return self.disk.get_disk_pool(dev)

    def __get_all_pool(self):
        all_pool = []
        if self.disk_search is None:
            self.disk_search = self.__disk_load_search()

        for disk in self.disk_search:
            if disk[0].startswith('disk'):
                if disk[2] not in all_pool:
                    all_pool.append(disk[2])

        return all_pool

    def __get_lich_metadisk(self):
        lich_disk = {}

        meta_path = os.path.join(self.config.home, 'data');
        meta_disk = self.disktool.get_dev_bymounted(meta_path)
        if meta_disk:
            lich_disk['__metadev__'] = meta_disk

        wlog_path = os.path.join(meta_path, 'wlog');
        wlog_disk = self.disktool.get_dev_bymounted(wlog_path)
        if wlog_disk:
            lich_disk['__wlogdev__'] = wlog_disk

        return lich_disk

    def __get_lich_disk(self):
        lich_disk = {}

        if os.path.exists(self.disk_home):
            for disk in os.listdir(self.disk_home):
                disk_num = disk.split('.')[0]
                disk_path = os.path.join(self.disk_home, disk)
                if os.path.islink(disk_path):
                    disk_dev = os.path.realpath(os.path.join(self.disk_home, os.readlink(disk_path)))
                    pool = self.__get_disk_pool(disk_dev)

                    if pool is None:
                        pool = '__unknow__'
                    if pool not in lich_disk:
                        lich_disk[pool] = {}
                    lich_disk[pool]['disk' + disk_num] = disk_dev

                    '''
                    if need_convert == True:
                        if self.config.cache_enable and disk_dev[5:].startswith(self.config.cache_type):
                            disk_dev = self.commoncache.get_coredev_by_fastdev(disk_dev, self.config.cache_type)
                            cacheid, cachedev, cache_status, cache_mode = self.commoncache.get_cacheinfo_by_coredev(disk_dev, self.config.cache_type)
                    '''

        return lich_disk

    def get_lich_disk(self):
        return self.__get_lich_disk()

    def get_pool_disk(self, pool):
        pool_disk = []
        lich_disk = self.__get_lich_disk()
        for disk in lich_disk:
            if disk[0].startswith('disk') and disk[2] == pool:
                pool_disk.append(int(disk[0][4:]))

        return pool_disk

    def __get_lich_dev(self):
        lich_dev = []
        lich_disk = [dev for (x, dev, pool) in self.__get_lich_disk() if dev]
        for disk in lich_disk:
            dev = self.disktool.get_dev(disk)
            if dev not in lich_dev:
                lich_dev.append(dev)

        return lich_dev

    def __get_lich_link(self):
        lich_link = {}
        if os.path.exists(self.disk_home):
            for disk in os.listdir(self.disk_home):
                disk_num = disk.split('.')[0]
                disk_path = os.path.join(self.disk_home, disk)
                if os.path.islink(disk_path):
                    disk_dev = os.readlink(disk_path)
                    if not self.disktool.is_dev(disk_dev):
                         continue
                    lich_link[int(disk_num)] = disk_dev
        return lich_link

    def get_pool_link(self, pool):
        pool_link = {}
        lich_link = self.__get_lich_link();
        pool_disk = self.get_pool_disk(pool)
        for i in pool_disk:
            if i in lich_link:
                if self.config.cache_enable and self.config.cache_type in lich_link[i]:
                    lich_link[i] = self.commoncache.get_coredev_by_fastdev(lich_link[i], self.config.cache_type)
                pool_link[i] = lich_link[i]

        return pool_link

    def __disk_set_metawlog(self):
        clusteruuid = self.config.getclusteruuid()
        hostname = self.config.hostname

        meta_path = os.path.join(self.config.home, 'data');
        if os.path.ismount(meta_path):
            dev = self.disktool.get_dev_bymounted(meta_path)
            self.disk.disk_write(dev, 'cluster=%s;node=%s;type=meta;' % (clusteruuid, hostname), VALID_DATA_OFFSET)
            self.disk.set_disk_label(dev, 'lich-meta')

        wlog_path = os.path.join(meta_path, 'wlog')
        if os.path.ismount(wlog_path):
            dev = self.disktool.get_dev_bymounted(wlog_path)
            self.disk.disk_write(dev, 'cluster=%s;node=%s;type=wlog;' % (clusteruuid, hostname), VALID_DATA_OFFSET)
            self.disk.set_disk_label(dev, 'lich-wlog')

    def __disk_set_clusteruuid(self, lich_disk):
        clusteruuid = self.config.getclusteruuid()
        for pool in lich_disk:
            for disk in lich_disk[pool]:
                dev = lich_disk[pool][disk]
                self.disk.set_disk_clusteruuid(dev, clusteruuid)

    def __disk_set_nodename(self, lich_disk):
        hostname = self.config.hostname
        for pool in lich_disk:
            for disk in lich_disk[pool]:
                dev = lich_disk[pool][disk]
                self.disk.set_disk_nodename(dev, clusteruuid)

    def disk_set(self, arg):
        lich_disk = self.__get_lich_disk()
        if arg == 'clusteruuid':
            self.__disk_set_clusteruuid(lich_disk)
        elif arg == 'nodename':
            self.__disk_set_nodename(lich_disk)
        elif arg == 'all':
            self.__disk_set_clusteruuid(lich_disk)
            self.__disk_set_nodename(lich_disk)
        elif arg == 'metawlog':
            self.__disk_set_metawlog()
        else:
            raise Exp(errno.EINVAL, '%s is invalid argument, use --help for help' % arg)

    def __disk_load_valid(self, dev):
        clusteruuid = self.config.getclusteruuid()
        hostname = self.config.hostname
        retry = 0
        while hostname == 'N/A':
            if retry > 10:
                _syserror(" disk_manage, get hostname fail, please check lich.conf or ifconfig")
                raise Exp(errno.EINVAL, 'get hostname fail, please check lich.conf or ifconfig')

            self.config.refresh()
            hostname = self.config.hostname
            time.sleep(1)
            retry += 1

        if self.config.cache_enable and self.config.cache_type in dev:
            valid = self.commoncache.is_valid_cachedev(dev, self.config.cache_type)
            if not valid:
                # TODO when dump json
                # _dwarn("%s not a valid cache device, pelase check it!" % dev)
                return None

        bitmap_path = os.path.join(self.config.home, 'data/disk/bitmap')
        info_path = os.path.join(self.config.home, 'data/disk/info')

        load_valid = self.disk.disk_load_valid(dev, clusteruuid, hostname)
        if load_valid is None:
            return None

        if load_valid['type'] == 'data':
                disk_num = load_valid['disk']
                pool = load_valid['pool']
                disk_bitmap = os.path.join(bitmap_path, disk_num + '.bitmap')
                disk_info = os.path.join(info_path, disk_num + '.info')
                if not os.path.exists(disk_bitmap) or not os.path.exists(disk_info):
                    return None

                buff = _file_read(disk_info)
                valid = self.disk.disk_info_valid(dev, buff)

                if valid:
                    return ('disk' + disk_num, dev, pool)
        else:
            return (load_valid['type'], dev, None)

    def __disk_load_search(self):
        lich_disk = []
        all_devs = self.disk.get_all_disk()
        for dev in all_devs:
            match = self.__disk_load_valid(dev)
            if match:
                lich_disk.append(match)

        return lich_disk

    def __disk_load_check(self, items):
        lich_type = []
        lich_disk = []
        for item in items:
            if not item[1]:
                raise Exp(errno.EINVAL, '%s not specify' % item[0])
            if item[0] == 'meta' or item[0] == 'wlog':
                if item[0] in lich_type:
                    raise Exp(errno.EINVAL, '%s disk repeat' % item[0])
                lich_type.append(item[0])
                if item[1] in lich_disk:
                    raise Exp(errno.EINVAL, '%s repeat used' % item[1])
                lich_disk.append(item[1])
            elif item[0].startswith('disk'):
                m = re.match('disk(\d+)', item[0])
                if m is not None:
                    if int(m.group(1)) > DISK_MAX_NUM:
                        raise Exp(errno.EINVAL, '%s disk number too big' % item[0])
                else:
                    raise Exp(errno.EINVAL, '%s not support' % item[0])
                if item[0] in lich_type:
                    raise Exp(errno.EINVAL, '%s repeat' % item[0])
                lich_type.append(item[0])
                if item[1] in lich_disk:
                    raise Exp(errno.EINVAL, '%s repeat used' % item[1])
                lich_disk.append(item[1])
            elif item[0] == 'new':
                continue
            else:
                raise Exp(errno.EINVAL, '%s=%s config not support' % (item[0], item[1]))

    def __disk_load_cleanup(self, items, lich_items):
        info_path = os.path.join(self.config.home, 'data/disk/info')
        disk_path = os.path.join(self.config.home, 'data/disk/disk')
        for pool in lich_items:
            for disk in lich_items[pool]:
                dev = lich_items[pool][disk]

                found = False
                for i in items:
                    if i[0] == disk:
                        found = True
                        break
                if (found):
                    continue

                disk_num = re.match('(\D+)(\d+)', disk).group(2)
                disk_info = os.path.join(info_path, disk_num + ".info")
                if not os.path.exists(disk_info):
                    continue

                '''
                sample:
                    ramdisk : ram_p1_disk0_8589934592_21313131
                    spdk : pci_p1_disk0_0000.05.00.0
                '''
                realpath = dev
                if realpath.startswith('ram') or realpath.startswith('pci'):
                    continue

                disk_path = os.path.join(self.disk_home, disk_num + ".disk")
                _syswarn(" disk_manage, %s(%s) not found in system" % (disk, dev))
                _dwarn("lich disk %s(%s) not found in system" % (disk, dev))
                _exec_system("rm -rf %s" % disk_path)
                self.__disk_del_info(disk_num)

    def __disk_load_metawlog(self, items):
        meta_path = os.path.join(self.config.home, 'data')
        wlog_path = os.path.join(meta_path, 'wlog')
        if not os.path.exists(meta_path):
            os.mkdir(meta_path)
        if not os.path.exists(wlog_path):
            os.mkdir(wlog_path)

        for item in items:
            if item[0] == 'meta':
                dev = self.disktool.get_dev_bymounted(meta_path)
                if dev == item[1]:
                    continue
                elif dev is not None:
                    _dwarn("%s mounted by %s not %s!"%(meta_path, dev, item[1]))
                    continue
                try:
                    self.disktool.dev_mount(item[1], meta_path)
                except Exp, e:
                    _dwarn(e.err)

            if item[0] == 'wlog':
                dev = self.disktool.get_dev_bymounted(wlog_path)
                if dev == item[1]:
                    continue
                elif dev is not None:
                    _dwarn("%s mounted by %s not %s!"%(wlog_path, dev, item[1]))
                    continue
                try:
                    self.disktool.dev_mount(item[1], wlog_path)
                except Exp, e:
                    _dwarn(e.err)

    def __disk_load_disk(self, items, lich_items):
        info_path = os.path.join(self.config.home, 'data/disk/info')
        disk_path = os.path.join(self.config.home, 'data/disk/disk')

        need_reset = False
        for item in items:
            if item[0] == 'new':
                _dwarn('disk %s is new' % item[1])
            elif item[0].startswith('disk'):
                right = False
                for pool in lich_items:
                    for disk in lich_items[pool]:
                        dev = lich_items[pool][disk]
                        if disk == item[0] and dev.endswith(item[1]):
                            right = True
                            break

                if right:
                    continue

                need_reset = True
                disk_num = re.match('(\D+)(\d+)', item[0]).group(2)
                lich_disk = str(disk_num) + ".disk"
                disk_path = os.path.join(self.disk_home, lich_disk)
                dev = item[1]
                pool = item[2]

                _syswarn(" disk_manage, %s(%s) not found in lich" % (item[0], item[1]))
                _dwarn(" lich disk %s(%s) not found in lich" % (item[0], item[1]))
                _exec_system("rm -rf %s" % disk_path)
                self.__disk_del_info(disk_num)

                try:
                    self.__disk_add_tier(pool, dev, disk_num)
                except:
                    _derror("%s add tier failed." % dev)
                    continue

                try:
                    self.__disk_add_link(dev, disk_num, pool)
                except:
                    _derror("%s add link failed." % dev)
                    raise

        if (need_reset):
            self.__disk_add_reset()

    def __disk_load_nvme(self, items, lich_items):
        for pool in lich_items:
            for disk in lich_items[pool]:
                dev = lich_items[pool][disk]
                dev_type = self.disktool.get_dev_type(dev)
                if self.config.spdk and dev_type == 'NVMe':
                    self.__disk_del_link(int(disk[4:]))
                    pci = self.__nvme2spdk(dev)
                    try:
                        self.__disk_add_link(pci, int(disk[4:]), pool)
                    except:
                        _derror("%s add link failed." % dev)
                        raise
                elif not self.config.spdk and dev_type == 'SPDK':
                    self.__disk_del_link(int(disk[4:]))
                    self.__disk_del_spdk(dev, disk_num, pool)
                    pci = self.__spdk2nvme(dev.split('_')[-1])
                    try:
                        self.__disk_add_link(pci, int(disk[4:]), pool)
                    except:
                        _derror("%s add link failed." % dev)
                        raise

    def __disk_load_loding(self, items):
        if not os.path.exists(self.disk_home):
            os.makedirs(self.disk_home)

        lich_items = self.__get_lich_disk()

        '''
        #lich disk not found in system
        '''
        self.__disk_load_cleanup(items, lich_items)

        '''
        #mount meta & wlog disk
        '''
        self.__disk_load_metawlog(items)

        '''
        #diskname changed, add disk again
        '''
        self.__disk_load_disk(items, lich_items)

        '''
        #nvme type changed, reset disk link
        '''
        self.__disk_load_nvme(items, lich_items)


    def __disk_load_auto(self):
        if self.disk_search is None:
            self.disk_search = self.__disk_load_search()
        self.__disk_load_check(self.disk_search)
        self.__disk_load_loding(self.disk_search)

    def __disk_load_conf(self, conf):
        if not os.path.exists(conf):
            raise Exp(errno.EINVAL, '%s not exists' % conf)

        cf = ConfigParser.ConfigParser()
        cf.read(conf)
        items = cf.items("disk")

        self.__disk_load_check(items)
        self.__disk_load_loding(items)

    def __disk_load_args(self, args):
        for arg in args:
            if '=' not in arg:
                raise Exp(errno.EINVAL, '%s is invalid argument, use --help for help' % arg)
            if not arg.startswith('meta=') and \
                not arg.startswith('wlog=') and \
                not arg.startswith('disk'):
                    raise Exp(errno.EINVAL, '%s is invalid argument, use --help for help' % arg)

        items = []
        for arg in args:
            items.append(tuple(arg.split('=')))

        self.__disk_load_check(items)
        self.__disk_load_loding(items)

    def disk_load(self, args=None):
        if (self.config.testing):
            return
        if not args or len(args) == 0:
            return self.__disk_load_auto()
        elif len(args) == 1 and args[0].startswith('conf='):
            return self.__disk_load_conf(args[0].split('=')[-1])
        else:
            return self.__disk_load_args(args)

    def __disk_check_env(self):
        if not os.path.exists(self.disk_home):
            os.makedirs(self.disk_home)
        if not os.path.exists(self.tier_path):
            os.makedirs(self.tier_path)
        if not os.path.exists(self.speed_path):
            os.makedirs(self.speed_path)
        if not os.path.exists(self.rotation_path):
            os.makedirs(self.rotation_path)

        try:
            pool_list = self.pool_manage.pool_list()
        except:
            return

        all_pool = self.__get_all_pool()
        for p in all_pool:
            if p not in pool_list:
                self.pool_manage.pool_cleanup(p)

    def __disk_list_sysdev(self, all_disk_json, used_disk):
        sys_dev = self.disktool.get_sys_dev()

        all_disk_json['__sysdev__'] = {}
        used_disk['__sysdev__'] = {}
        for dev in sys_dev:
            disk_info = self.disk.get_disk_info(dev)
            all_disk_json['__sysdev__'][dev] = disk_info
            used_disk['__sysdev__'][dev] = self.disk.get_disk_real(dev)

    def __disk_list_lichdev(self, all_disk_json, used_disk):
        lich_items = self.__get_lich_metadisk()
        for item in lich_items:
            all_disk_json[item] = self.disk.get_disk_info(lich_items[item])
            used_disk[item] = self.disk.get_disk_real(lich_items[item])

        lich_items = self.__get_lich_disk()
        lich_stat = self.__get_disk_stat(lich_items)
        lich_tier = self.__get_lich_tier()

        for item in lich_items:
            all_disk_json[item] = {}
            used_disk[item] = {}
            for disk in lich_items[item]:
                dev = lich_items[item][disk]
                disk_info = self.disk.get_disk_info(dev)

                if disk in lich_stat:
                    disk_info['disk_stat'] = lich_stat[disk]
                disk_info['used'] = disk
                disk_num = int(disk[4:])
                if disk_num in lich_tier:
                    disk_info['tier'] = str(lich_tier[disk_num])
                else:
                    disk_info['tier'] = 'None'

                all_disk_json[item][dev] = disk_info
                used_disk[item][dev] = self.disk.get_disk_real(dev)

    def __disk_list_newdev(self, all_disk_json, used_disk):
        disks = self.disk.get_new_disk(used_disk)
        if len(disks) == 0:
            return

        all_disk_json['__newdev__'] = {}
        for dev in disks:
            all_disk_json['__newdev__'][dev] = self.disk.get_new_info(dev)

    def __disk_list_getall(self, is_all=False):
        '''
        {
            "__sysdev__": {"/dev/sda":{}}

            "__metadev__": {"/dev/sdb":{}}
            "__wlogdev__": {"/dev/sdc":{}}

            "pool-cache": {"/dev/sde":{}, "/dev/sdf":{}}
            "pool-hdd": {"/dev/sdg":{}, "/dev/sdh":{}}

            "__newdev__": {"/dev/sdi":{}}
        }
        '''
        all_disk_json = {}
        used_disk = {}

        self.__disk_list_sysdev(all_disk_json, used_disk)

        self.__disk_list_lichdev(all_disk_json, used_disk)

        self.__disk_list_newdev(all_disk_json, used_disk)

        return all_disk_json

    def __disk_list_getusable(self, all_disk, force):
        usable_disk = []
        for disk in all_disk:
            if all_disk[disk]['flag'] == 'new':
                if all_disk[disk]['mode'] == 'disk':
                    continue
                if self.disktool.dev_check_mounted(disk) and not force:
                    continue
                if 'part_info' in all_disk[disk] and not force:
                    continue
                dev_parts = self.disktool.get_dev_parts(disk)
                if len(dev_parts) and not force:
                    continue
                if all_disk[disk]['dev_info']['type'] == 'RAID':
                    if 'raid_info' in all_disk[disk]:
                        if all_disk[disk]['raid_info']['raid'] != '0' and not force:
                            continue
                elif all_disk[disk]['dev_info']['cache'] is None and not force:
                    continue
                usable_disk.append(disk)
        return usable_disk

    def __disk_show_sysdev(self, all_disk):
        print "__sysdev__:"
        for dev in all_disk:
            print self.disk.show_disk_info(dev, all_disk[dev])

    def __disk_show_useddev(self, pool, all_disk):
        print pool + ":"
        for dev in all_disk:
            used_info = "  " + all_disk[dev]['used']
            if 'disk_stat' in all_disk[dev]:
                free = (int(all_disk[dev]['disk_stat']['total']) -
                        int(all_disk[dev]['disk_stat']['used'])) * (1024*1024)
                used_info += "  free:" + _human_readable(free)
            used_info += "  tire:" + all_disk[dev]['tier']
            print  used_info  + " " + self.disk.show_disk_info(dev, all_disk[dev])

    def __disk_show_newdev(self, all_disk):
        print "__newdev__:"
        for dev in all_disk:
            print self.disk.show_new_disk(dev, all_disk[dev])

    def __disk_list_showall(self, all_disk, is_all):
        used_disk = {}
        for pool in all_disk:
            if pool not in LICH_SPECIAL_POOL:
                used_disk[pool] = all_disk[pool]

        if '__sysdev__' in all_disk and is_all:
            self.__disk_show_sysdev(all_disk['__sysdev__'])
        if '__metadev__' in all_disk:
            pass
        if '__wlogdev__' in all_disk:
            pass

        for pool in used_disk:
            self.__disk_show_useddev(pool, all_disk[pool])

        if '__newdev__' in all_disk:
            self.__disk_show_newdev(all_disk['__newdev__'])

    def __disk_list_cachedev(self, all_disk, cachedev):
        coredevs = {}
        for pool in all_disk:
            for dev in all_disk[pool]:
                if all_disk[pool][dev]['type'] == 'Bcache':
                    coredevs[all_disk[pool][dev]['coredev']] = all_disk[pool][dev]['coredev_info']

        for dev in coredevs:
            print self.disk.show_disk_info(dev, coredevs[dev])

    def list_all_used_cache_disk(self):
        all_disk = self.__disk_list_getall()
        used_disk = []

        for disk_type in all_disk:
            if disk_type not in LICH_SPECIAL_POOL:
                for disk in all_disk[disk_type]:
                    if self.config.cache_type in disk:
                        coredev = all_disk[disk_type][disk]['coredev']
                        cachedev = all_disk[disk_type][disk]['cachedev']
                        used_disk.append(coredev)
                        if cachedev not in used_disk:
                            used_disk.append(cachedev)

        return used_disk

    def disk_list(self, is_all, cachedev, is_json, verbose):
        self.__disk_check_env()
        all_disk = self.__disk_list_getall(is_all)
        if is_json:
            print json.dumps(all_disk)
        elif verbose:
            print json.dumps(all_disk, sort_keys=False, indent=4)
        else:
            if self.config.cache_enable and cachedev is not None:
                self.__disk_list_cachedev(all_disk, cachedev)
            else:
                self.__disk_list_showall(all_disk, is_all)

    def disk_list_with_return_json_value(self):
        self.__disk_check_env()
        all_disk = self.__disk_list_getall()
        return json.dumps(all_disk)

    def disk_speed(self, devs):
        for dev in devs:
            self.disk.get_disk_speed(dev, True)

    def __nvme2spdk(self, dev):
        pci = self.disktool.get_dev_pci(dev)
        '''
        echo '0000:05:00.0' > /sys/bus/pci/drivers/nvme/unbind
        '''
        _exec_system("echo '%s' > /sys/bus/pci/drivers/nvme/unbind" % pci.replace('.', ':', 2))
        return pci

    def __spdk2nvme(self, pci):
        '''
        echo '0000:05:00.0' > /sys/bus/pci/drivers/nvme/bind
        '''
        _exec_system("echo '%s' > /sys/bus/pci/drivers/nvme/bind" % pci.replace('.', ':', 2))

        retry = 0
        while retry < 10:
            time.sleep(1)
            dev = self.disk.get_dev_bypci(pci.replace('.', ':', 2))
            if dev is not None:
                break
            retry += 1

        if dev is None:
            raise Exp(errno.EINVAL, '%s spdk to nvme fail' %pci)

        return dev

    def __disk_add_check(self, devs, force, cache, pool):
        instance = self.node.instences[0]
        if not instance.running():
            raise Exp(errno.EINVAL, 'lichd not running')

        self.__disk_check_env()
        lich_disk = self.__get_lich_disk()
        sys_dev = self.disktool.get_sys_dev(False)
        sys_dev_part = self.disktool.get_sys_dev(True)
        add_disk = []
        for dev in devs:
            if self.disktool.dev_check_mounted(dev) and not force:
                raise Exp(errno.EINVAL, '%s or partition was mounted, please use --force' % dev)

            if dev.startswith('/dev/'):
                if not self.disktool.is_block(dev):
                    raise Exp(errno.EINVAL, '%s is not block device' % dev)
            elif not self.disktool.is_nvme(dev):
                raise Exp(errno.EINVAL, '%s is not block device' % dev)
            if dev in [disk[1] for disk in lich_disk]:
                raise Exp(errno.EINVAL, '%s already used by lich' % dev)
            if dev in sys_dev or dev in sys_dev_part:
                raise Exp(errno.EINVAL, '%s mounted on /' % dev)
            if self.disktool.is_mounted(dev) is not None and not force:
                raise Exp(errno.EINVAL, '%s mounted, please use --force' % dev)

            if self.disktool.is_part(dev) and not force:
                raise Exp(errno.EINVAL, 'dev %s is a partition' %(dev))

            dev_parts = self.disktool.get_dev_parts(dev)
            if len(dev_parts) and not force:
                raise Exp(errno.EINVAL, 'dev %s has partitions %s' %(dev, dev_parts))

            dev_info = self.disktool.get_dev_info(dev)
            dev_type = dev_info['type']
            if dev_type == 'ISCSI':
                raise Exp(errno.EINVAL, 'can not add disk %s type(%s) to lich' % (dev, dev_type))
            elif dev_type == 'UNKNOW' and not force:
                raise Exp(errno.EINVAL, 'can not add disk %s type(%s) to lich' % (dev, dev_type))
            elif dev_type == 'RAID':
                if not force and \
                        not dev_info['raidcard'].startswith('LSI') and \
                        not dev_info['raidcard'].startswith('HewlettPackard'):
                    raise Exp(errno.EINVAL, 'not support %s raidcard: %s' %(dev, dev_info['raidcard']))

                dev_info = self.raid.raid_info(dev)
                if dev_info is None:
                    return
                disk_info = self.raid.disk_info(dev)
                disk_type = disk_info['media_type']
                if cache != 0 and disk_type != 'SSD' and not force:
                    raise Exp(errno.EINVAL, 'disk %s type %s can not be set to cache' %(dev, disk_type))

                if self.cacheconf is None:
                    self.cacheconf = self.parse_cacheconf(self.config.cacheconf, devs)
                if disk_type == 'SSD':
                    self.raid.raid_check([self.disktool.get_dev(dev)], self.cacheconf, True)
                else:
                    self.raid.raid_check([self.disktool.get_dev(dev)], self.cacheconf, force)
            elif dev_type == 'NVMe':
                if self.config.spdk:
                    pci = self.__nvme2spdk(dev)
                    idx = devs.index(dev)
                    devs.pop(idx)
                    devs.insert(idx, pci)
            elif dev_type == 'SPDK':
                if not self.config.spdk:
                    pci = self.__spdk2nvme(dev)
                    if pci is None:
                        raise Exp(errno.EINVAL, 'disk %s add fail, retry again' %(dev))
                    idx = devs.index(dev)
                    devs.pop(idx)
                    devs.insert(idx, pci)
            else:
                if not force and \
                        dev_info['cache'] != 'Enabled' and \
                        dev_info['cache'] != 'Disabled':
                    raise Exp(errno.EINVAL, 'not support %s cache: %s' %(self.disktool.get_dev(dev), dev_info['cache']))
                if cache != 0 and dev_type != 'SSD' and not force:
                    raise Exp(errno.EINVAL, 'disk %s type %s can not be set to cache' %(dev, dev_type))

                self.__disk_check_cache({dev_type:[self.disktool.get_dev(dev)]}, True)
            if dev in add_disk:
                raise Exp(errno.EINVAL, '%s repeat' % dev)
            else:
                add_disk.append(dev)

        try:
            self.__check_lich_speed()
            self.__check_lich_rotation()
        except:
            if not force:
                raise

        pool_list = self.pool_manage.pool_list()
        if pool != "default" and pool not in pool_list:
            raise Exp(errno.EPERM, "pool %s not found" % pool)
        elif pool == "default" and pool not in pool_list:
            self.pool_manage.pool_create(pool)

    def __disk_tier_adjust(self, pool, disk_num = None, disk_speed = None, p=False):
        lich_tier = self.__get_lich_tier()
        lich_speed = self.__get_lich_speed()
        lich_mask = {}
        idx_tier = -1
        cur_tier = -1

        for disk in lich_tier:
            if disk in lich_speed:
                lich_mask[disk] = self.__get_disk_tiermask(lich_speed[disk])

        if disk_num is not None and disk_speed is not None:
            lich_mask[disk_num] = self.__get_disk_tiermask(disk_speed)
        lich_mask = self.__get_disk_tier(lich_mask)

        sort_tier = sorted(lich_mask.items(), key=lambda d: d[1])
        for disk in sort_tier:
            tier = disk[1]
            if tier != cur_tier:
                cur_tier = tier
                if idx_tier != DISK_MAX_TIER:
                    idx_tier += 1
            lich_tier[disk[0]] = idx_tier

        if p:
            print "tier:", sorted(lich_tier.items(), key=lambda d: d[1])
        return lich_tier

    def __disk_tier_adjust_new(self, pool, disk_num = None, disk_speed = None, disk_rotation = None, p=False):
        lich_link = self.get_pool_link(pool)
        lich_tier = self.__get_pool_tier(pool)
        lich_speed = self.__get_pool_speed(pool)
        lich_rotation = self.__get_pool_rotation(pool)
        lich_mask = {}
        idx_tier = -1
        cur_tier = -1

        if disk_num is not None and disk_speed is not None:
            lich_speed[disk_num] = disk_speed
            lich_link[disk_num] = None
        if disk_num is not None and disk_rotation is not None:
            lich_rotation[disk_num] = disk_rotation
            lich_link[disk_num] = None

        lich_mask = self.__get_disk_tiermask_withspeed(lich_speed)
        #if lich_mask.keys() == lich_link.keys() and \
        #        lich_rotation.keys() == lich_link.keys():
        #    lich_mask = self.__get_disk_tiermask_withrotation(lich_mask, lich_rotation)
        lich_mask = self.__get_disk_tier_withmask(lich_mask)

        sort_tier = sorted(lich_mask.items(), key=lambda d: d[1])
        for disk in sort_tier:
            tier = disk[1]
            if tier != cur_tier:
                cur_tier = tier
                if idx_tier != DISK_MAX_TIER:
                    idx_tier += 1
            lich_tier[disk[0]] = idx_tier

        if p:
            print "tier:", sorted(lich_tier.items(), key=lambda d: d[1])
        return lich_tier


    def __disk_tier_update(self, lich_tier):
        for disk in lich_tier:
            _exec_system("echo %d > %s" %(lich_tier[disk], os.path.join(self.tier_path, str(disk) + ".tier")), False)

    def __disk_add_speed(self, disk_num, disk_speed):
        _exec_system("echo %d >  %s" % (disk_speed, os.path.join(self.speed_path, str(disk_num) + ".speed")))

    def __disk_add_rotation(self, disk_num, disk_rotation):
        if disk_rotation is not None:
            _exec_system("echo %d >  %s" % (disk_rotation, os.path.join(self.rotation_path, str(disk_num) + ".rotation")))

    def __disk_add_tier_withspeed(self, pool, disk_num, disk_speed, disk_rotation):
        lich_tier = self.__disk_tier_adjust_new(pool, disk_num, disk_speed, disk_rotation, True)
        self.__disk_tier_update(lich_tier)
        self.__disk_add_speed(disk_num, disk_speed)
        self.__disk_add_rotation(disk_num, disk_rotation)

    def __disk_add_tier_withtype(self, disk_num, disk_type):
        lich_tier = 0 if disk_type == 'SSD' else 1
        print "add disk %s tier %s" %(disk_num, lich_tier)
        self.__disk_tier_update({disk_num:lich_tier})

    def __disk_add_link(self, dev, disk_num, pool):
        lich_disk = str(disk_num) + ".disk"
        target_disk = self.disk.get_disk_target(dev, disk_num, pool)
        '''ln -s /dev/sda /opt/fusionstack/data/disk/0.disk'''
        _exec_system("ln -s " + target_disk + " " + os.path.join(self.disk_home, lich_disk))

    def __disk_add_tier(self, pool, dev, disk_num):
        if self.tier_withtype:
            dev_type = self.disktool.get_dev_type(dev)
            if dev_type == 'RAID':
                disk_info = self.raid.disk_info(dev)
                disk_type = disk_info['media_type']
            else:
                disk_type = dev_type
            return self.__disk_add_tier_withtype(disk_num, disk_type)
        else:
            ''' get disk speed must before add link, because get speed need write test '''
            print "get %s speed start.." % dev
            disk_speed = self.disk.get_disk_speed(dev)
            print "get %s speed %d" %(dev, disk_speed)
            disk_rotation = self.disk.get_disk_rotation(dev)
            return self.__disk_add_tier_withspeed(pool, disk_num, disk_speed, disk_rotation)

    def __disk_add_superblock(self, dev, disk_num, pool, cache):
        clusteruuid = self.config.getclusteruuid()
        hostname = self.config.hostname
        self.disk.disk_add_superblock(dev, disk_num, pool, cache, clusteruuid, hostname)

    def __disk_add_disk(self, devs, force, cache, pool):
        for dev in devs:
            if self.disktool.is_mounted(dev) is not None and force:
                self.disktool.dev_umount(dev)
            elif self.disktool.is_mounted(dev):
                raise Exp(errno.EINVAL, '%s mounted, please use --force' % dev)

            new = True
            match = self.__disk_load_valid(dev)
            if match:
                if match[0].startswith('disk'):
                    _dwarn("disk %s used by lich %s, please restart lichd, "
                            "or cleanup disk use `dd if=/dev/zero of=%s bs=1M count=1'" % (dev, match[0], dev))
                    continue
                else:
                    _dwarn('disk %s used by lich %s, please restart lichd' % (dev, match[0]))
                    continue

            if new:
                disk_num = self.__get_disk_num()

            if cache != 100:
                try:
                    self.__disk_add_tier(pool, self.disktool.get_dev(dev), disk_num)
                except Exception, e:
                    raise
                    if not force:
                        _derror("%s add tier failed:%s" % (dev, e))
                        continue

            if new:
                self.__disk_add_superblock(dev, disk_num, pool, cache)

            try:
                self.__disk_add_link(dev, disk_num, pool)
            except Exception, e:
                _derror("%s add link failed:%s" % (dev, e))
                raise

    def __disk_add_wait(self):
        disk_path = os.path.join(self.config.home, 'data/disk/disk')
        bitmap_path = os.path.join(self.config.home, 'data/disk/bitmap')
        info_path = os.path.join(self.config.home, 'data/disk/info')

        for disk in os.listdir(disk_path):
            disk_num = disk.split('.')[0]
            if not os.path.exists(os.path.join(bitmap_path, str(disk_num) + '.bitmap')):
                return True
            if not os.path.exists(os.path.join(info_path, str(disk_num) + '.info')):
                return True

        return False

    def __disk_add_reset(self):
        while self.__disk_add_wait():
            time.sleep(1)

        instance = self.node.instences[0]
        instance.stop()
        _exec_system("rm -rf %s/hsm" % self.config.shm)
        instance.start()

    def disk_add(self, devs, v, force, cache, pool):
        if len(devs) == 1 and devs[0] == 'all':
            all_disk = self.__disk_list_getall()
            devs = self.__disk_list_getusable(all_disk, force)
            if len(devs):
                _dmsg("add disk %s" % devs)
            else:
                _dmsg("no usable disk found!")

        if len(devs):
            if pool is None:
                pool = 'default'

            self.__disk_add_check(devs, force, cache, pool)

            fd = _lock_file1("/var/run/add_disk.lock")
            old_tier = self.__get_pool_tier(pool)
            self.__disk_add_disk(devs, force, cache, pool)
            new_tier = self.__get_pool_tier(pool)
            _unlock_file1(fd)

            for tier in old_tier:
                if new_tier[tier] != old_tier[tier]:
                    self.__disk_add_reset()
                    break

    def __disk_del_check(self, devs):
        lich_disk = self.__get_lich_disk()
        del_disk = []
        for dev in devs:
            if dev not in [disk[1] for disk in lich_disk]:
                raise Exp(errno.EINVAL, '%s not used by lich' % dev)
            elif dev not in [disk[1] for disk in lich_disk if disk[0].startswith('disk')]:
                raise Exp(errno.EINVAL, 'only support delete data disk')
            if dev in del_disk:
                raise Exp(errno.EINVAL, '%s repeat' % dev)
            else:
                del_disk.append(dev)

    def __disk_del_superblock(self, dev):
        self.disk.disk_del_superblock(dev)

    def __disk_del_wait(self, dev, disk_num):
        info_path = os.path.join(self.config.home, 'data/disk/info')
        used = 0
        while(used < 10*60*60):
            if not os.path.exists(os.path.join(info_path, disk_num + '.info')):
                return
            sys.stdout.write(str(10*60*60 - int(used)) + "\r")
            sys.stdout.flush()
            used += 1
            time.sleep(1)
        raise Exp(errno.EPERM, "delete disk %s failed" % dev)

    def __disk_del_info(self, disk_num, p = True):
        tier_path = os.path.join(self.tier_path, disk_num + '.tier')
        if os.path.exists(tier_path):
            ret = _exec_system("rm -rf " + tier_path, p)
            if ret:
                _derror("delete %s tier failed." % dev)

        speed_path = os.path.join(self.speed_path, disk_num + '.speed')
        if os.path.exists(speed_path):
            ret = _exec_system("rm -rf " + speed_path, p)
            if ret:
                _derror("delete %s speed failed." % dev)

        rotation_path = os.path.join(self.rotation_path, disk_num + '.rotation')
        if os.path.exists(rotation_path):
            ret = _exec_system("rm -rf " + rotation_path, p)
            if ret:
                _derror("delete %s rotation failed." % dev)

    def __disk_del_link(self, disk_num):
        lich_disk = str(disk_num) + ".disk"
        '''rm -rf /opt/fusionstack/data/disk/0.disk'''
        _exec_system("rm -rf " + os.path.join(self.disk_home, lich_disk))

    def __disk_del_spdk(self, dev, disk_num, pool):
        target_disk = self.disk.get_disk_target(dev, disk_num, pool)
        _exec_system("rm -rf " + target)

    def __disk_del_disk(self, devs):
        lich_disk = self.__get_lich_disk()
        for dev in devs:
            disk_num = None
            for lich_dev in lich_disk:
                if dev == lich_dev[1]:
                    m = re.match('(\D+)(\d+)', lich_dev[0])
                    if m is not None:
                        disk_num = m.group(2)

            #cmd = "%s --castoff %s/%s"%(self.config.admin, self.node.instences[0].name, disk_num)
            #ret = _exec_system(cmd)
            #if ret:
            #    _derror("%s delete failed." % dev)
            #    continue
            self.node.node_drop(int(disk_num))

            self.__disk_del_info(disk_num, False)

            #lich_tier = self.__disk_tier_adjust()
            #self.__disk_tier_update(lich_tier)

            self.__disk_del_superblock(dev)

    def __disk_del_dev_disk(self, devs):
        lich_disk = self.__get_lich_disk(False)
        # i.e. [('disk0', '/dev/sdb', 'p1')]
        for dev in devs:
            disk_num = None
            for lich_dev in lich_disk:
                if dev == lich_dev[1]:
                    m = re.match('(\D+)(\d+)', lich_dev[0])
                    if m is not None:
                        disk_num = m.group(2)

                        dev_disk_path = os.path.join(self.disk_home, disk_num + '.disk')
                        if os.path.exists(dev_disk_path):
                            ret = _exec_system("rm -rf " + dev_disk_path, False)
                            if ret:
                                _derror("delete %s tier failed." % dev)

    def disk_del(self, devs, v):
        #self.__disk_del_dev_disk(devs)
        self.__disk_del_check(devs)
        self.__disk_del_disk(devs)

    def __disk_check_cache(self, devs, force=False, setcache=True):
        if self.cacheconf is None:
            self.cacheconf = self.parse_cacheconf(self.config.cacheconf)
        if len(self.cacheconf) == 0:
            return {}

        cache_stat = {}
        for dev_type in devs:
            for dev in devs[dev_type]:
                if dev in self.cacheconf:
                    disk = dev
                else:
                    disk = dev_type.lower()

                if disk == 'unknow':
                    if force:
                        continue
                    else:
                        raise Exp(errno.EINVAL, 'not support %s type: %s' %(dev, disk))

                if disk == 'nvme' or disk == 'bcache':
                    continue
                if 'skip'in self.cacheconf[disk] and self.cacheconf[disk]['skip']:
                    continue
                if 'disk_cache' in self.cacheconf[disk]:
                    disk_cache = self.cacheconf[disk]['disk_cache']
                else:
                    disk_cache = 'disable'

                dev_cache = self.disktool.get_dev_cache(dev)
                if dev_cache is not None and dev_cache.lower() != disk_cache:
                    if setcache:
                        _dmsg("set %s cache to %s" % (dev, disk_cache))
                        self.disktool.set_dev_cache(dev, disk_cache, force)
                    else:
                        cache_stat[dev] = {'disk_cache': disk_cache}

        return cache_stat

    def __disk_check_split(self):
        raid_devs  = []
        hdd_devs = {'HDD':[], 'SSD':[]}
        if (self.config.testing):
            return raid_devs, hdd_devs

        lich_disk = self.__get_lich_disk()
        for pool in lich_disk:
            for disk in lich_disk[pool]:
                dev = lich_disk[pool][disk]
                if not dev:
                    continue
                dev = self.disktool.get_dev(dev)
                if dev is None:
                    continue
                if self.disktool.is_hba(dev):
                    continue
                dev_type = self.disktool.get_dev_type(dev)
                if dev_type == 'RAID' and dev not in raid_devs:
                    raid_devs.append(dev)
                elif dev_type == 'HDD' and dev not in hdd_devs['HDD']:
                    hdd_devs['HDD'].append(dev)
                elif dev_type == 'SSD' and dev not in hdd_devs['SSD']:
                    hdd_devs['SSD'].append(dev)
                else:
                    pass

        sys_dev = self.disktool.get_sys_dev()
        for dev in sys_dev:
            if self.disktool.is_hba(dev):
                continue
            dev_type = self.disktool.get_dev_type(dev)
            if dev_type == 'RAID' and dev not in raid_devs:
                raid_devs.append(dev)
            elif dev_type == 'HDD' and dev not in hdd_devs['HDD']:
                hdd_devs['HDD'].append(dev)
            elif dev_type == 'SSD' and dev not in hdd_devs['SSD']:
                hdd_devs['SSD'].append(dev)

        return raid_devs, hdd_devs

    def __disk_check_health(self):
        raid_info = self.raid.raid_ldpdinfo()
        disk_info = self.disk_list_with_return_json_value()
        all_disks = json.loads(disk_info)

        used_disk = {}
        for disk in all_disks:
            if all_disks[disk]['flag'] == 'lich':
                used_disk[disk] = all_disks[disk]

        for disk in used_disk:
            if 'raid_info' in used_disk[disk].keys():
                if used_disk[disk]['raid_info']['adp_type'] == 'LSI':
                    (adpid, dev_vd) = mega_raid.get_dev_vd(disk)
                    cmd = 'smartctl -d megaraid,%s %s -A' % (raid_info['LSI'][adpid][dev_vd], disk)
                    cmd = cmd.split()
                    try:
                        (out_msg, err_msg) = _exec_pipe1(cmd, 0, False)
                    except Exp, e:
                        _syswarn(" smartctl, %s" % err_msg)
                        continue

                else:
                    _syswarn(" 3007  Don't support raid type: %s(%s)" % (used_disk[disk]['raid_info']['adp_type'], disk))
                    continue

            else:
                cmd = 'smartctl -i %s -A' % disk
                cmd = cmd.split()
                try:
                    (out_msg, err_msg) = _exec_pipe1(cmd, 0, False)
                except Exp, e:
                    _syswarn(" smartctl, %s" % err_msg)
                    continue

            num = out_msg.count('\n')
            lines = out_msg.splitlines(num)
            for line in lines:
                if 'Reallocated_Sector_Ct' in line:
                    if int(line.split()[-1]) > 0:
                        _syserror(' 3006  %s:%s' % (disk.split('/')[-1], line))

    def disk_check(self, arg):
        if self.cacheconf is None:
            self.cacheconf = self.parse_cacheconf(self.config.cacheconf)
        if arg == 'cache':
            (raid_devs, hdd_devs)  = self.__disk_check_split()

            if len(raid_devs) != 0:
                self.raid.raid_check(raid_devs, self.cacheconf, True)
            if len(hdd_devs) != 0:
                self.__disk_check_cache(hdd_devs, True)
        elif arg == 'cacheset':
            (raid_devs, hdd_devs)  = self.__disk_check_split()

            if len(raid_devs) != 0:
                self.raid.raid_check(raid_devs, self.cacheconf, True)
            if len(hdd_devs) != 0:
                self.__disk_check_cache(hdd_devs)
        elif arg == 'cachestat':
            cachestat = {}
            (raid_devs, hdd_devs)  = self.__disk_check_split()

            if len(raid_devs) != 0:
                stat = self.raid.raid_check(raid_devs, self.cacheconf, True, False)
                cachestat.update(stat)
            if len(hdd_devs) != 0:
                stat = self.__disk_check_cache(hdd_devs, True, False)
                cachestat.update(stat)

            return cachestat
        elif arg == 'tier':
            pool = self.pool_manage.pool_list()
            for p in pool:
                lich_tier = self.__get_pool_tier(p)
                if self.tier_withtype:
                    print p, ":", lich_tier
                else:
                    tier_adjust = self.__disk_tier_adjust_new(p)
                    if lich_tier == tier_adjust:
                        print p, ":", lich_tier
                    else:
                        print p, "tier:", lich_tier, "should:", tier_adjust
        elif arg == 'writeback':
            print self.__get_lich_writeback()
        elif arg == 'speed':
            pool = self.pool_manage.pool_list()
            for p in pool:
                print p, ":", self.__get_pool_speed(p)
        elif arg == 'rotation':
            pool = self.pool_manage.pool_list()
            for p in pool:
                print p, ":", self.__get_pool_rotation(p)
        elif arg == 'health':
            self.__disk_check_health()
        else:
            raise Exp(errno.EINVAL, '%s is invalid argument, use --help for help' % arg)

    def raid_add(self, devs, force):
        if (len(devs) == 1 and devs[0] == 'all'):
            devs = []
            new_raid_disk = self.raid.disk_list()
            for raid in new_raid_disk:
                new_raid = new_raid_disk[raid]
                for adp, disks in new_raid.iteritems():
                    for disk in disks:
                        devs.append(disks[disk]['inq'])

            if new_raid_disk:
                if len(new_raid_disk):
                    _dmsg("add raid %s" % devs)
                else:
                    _dmsg("no valid disk found!")
            else:
                _dmsg("no valid disk found!")

        new_disk = []
        all_disk = self.disktool.get_all_devs()
        for dev in devs:
            self.raid.raid_add(dev, force)
            now_disk = self.disktool.get_all_devs()
            for disk in now_disk:
                if disk not in all_disk:
                    new_disk.append(disk)
            all_disk = now_disk

        return new_disk

    def __raid_del_check(self, devs, force):
        lich_disk = self.__get_lich_disk()
        sys_dev = self.disktool.get_sys_dev()
        for dev in devs:
            if not self.disktool.is_dev(dev):
                raise Exp(errno.EINVAL, "%s is not block device" % dev)
            if self.disktool.get_dev_type(dev) != 'RAID':
                raise Exp(errno.EPERM, 'can not del disk %s, maybe not raid disk' % dev)
            if dev in [disk[1] for disk in lich_disk]:
                raise Exp(errno.EINVAL, '%s used by lich' % dev)
            if dev in sys_dev:
                raise Exp(errno.EINVAL, "can not delete system device")
            if self.disktool.is_mounted(dev) is not None and not force:
                raise Exp(errno.EINVAL, '%s mounted, please use --force' % dev)

    def __raid_del_disk(self, devs, force):
        for dev in devs:
            if self.disktool.is_mounted(dev) is not None and force:
                self.disktool.dev_umount(dev, True)
            elif self.disktool.is_mounted(dev) is not None:
                raise Exp(errno.EINVAL, '%s mounted, please use --force' % dev)
            self.raid.raid_del(dev, force)

    def raid_del(self, devs, force):
        self.__raid_del_check(devs, force)
        self.__raid_del_disk(devs, force)

    def raid_load(self):
        self.raid.raid_import()

    def raid_flush(self):
        self.raid.raid_flush()

    def __raid_cache_check(self, devs):
        for dev in devs:
            if self.disktool.get_dev_type(dev) != 'RAID':
                raise Exp(errno.EPERM, 'can not set disk %s raid cache' % dev)

    def raid_cache(self, switch, devs, policy):
        if len(devs) == 0:
            (raid_devs, hdd_devs)  = self.__disk_check_split()
            sys_dev = self.disktool.get_sys_dev()
            for dev in sys_dev:
                dev_type = self.disktool.get_dev_type(dev)
                if dev_type == 'RAID' and dev not in raid_devs:
                    raid_devs.append(dev)
            devs = raid_devs

        if not policy:
            self.__raid_cache_check(devs)
        self.raid.raid_cache(switch, devs, self.cacheconf, policy)

    def raid_miss(self):
        self.raid.raid_miss()

    def __disk_light_check(self, devs):
        for dev in devs:
            if dev.startswith('/dev/') and self.disktool.get_dev_type(dev) != 'RAID':
                raise Exp(errno.EPERM, 'can not set disk %s light flash' % dev)

    def disk_light(self, switch, devs):
        self.__disk_light_check(devs)
        self.raid.raid_light(switch, devs)
