#!/usr/bin/python
#coding=utf-8

import sys
import os
import rados
import threading
import commands
import copy
import ConfigParser
import logging
import logging.config

import global_list
from command import *

#常用参数列表
commonlist=[
{'id':1,'name':"filestore_queue_max_bytes",'level':0,'description':'数据盘一次操作的最大字节数','default':104857600},
{'id':2,'name':"filestore_op_threads",'level':0,'description':'并发文件系统操作数','default':2},
{'id':3,'name':"filestore_max_sync_interval",'level':0,'description':'从日志到数据盘最大同步间隔','default':5},
{'id':4,'name':"filestore_min_sync_interval",'level':0,'description':'从日志到数据盘最小同步间隔','default':0.01},
{'id':5,'name':"filestore_op_thread_timeout",'level':0,'description':'I/O线程超时告警时间','default':60},
{'id':6,'name':"filestore_op_thread_suicide_timeout",'level':0,'description':'I/O线程自杀时间','default':180},
{'id':7,'name':"filestore_fd_cache_size",'level':0,'description':'对象文件句柄缓存大小','default':128},
{'id':8,'name':"filestore_fd_cache_shards",'level':0,'description':'对象文件句柄缓存分片个数','default':16},
{'id':9,'name':"filestore_omap_header_cache_size",'level':0,'description':'扩展属性头缓存','default':1024},
{'id':10,'name':"filestore_merge_threshold",'level':0,'description':'PG子目录合并的最小文件数','default':10},
{'id':11,'name':"filestore_split_multiple",'level':0,'description':'PG子目录分裂乘数','default':2},
{'id':12,'name':"max_open_files",'level':0,'description':'在操作系统层面设置的最大打开文件描述符','default':0},
{'id':13,'name':"osd_pool_default_min_size",'level':0,'description':'存储池中的对象必须具有的最小副本数目','default':0},
{'id':14,'name':"objecter_inflight_ops",'level':0,'description':'允许的最大未发送I/O请求数','default':1024},
{'id':15,'name':"objecter_inflight_op_bytes",'level':0,'description':'允许的最大未发送脏数据','default':104857600},
{'id':16,'name':"journal_max_write_entries",'level':0,'description':'journal一次性写入的最大记录数','default':100},
{'id':17,'name':"journal_max_write_bytes",'level':0,'description':'journal一次性写入的最大字节数','default':10485760},
{'id':18,'name':"journal_dio",'level':0,'description':'启用径直I/O到日志，需要journal_block_align设置为true','default':'true'},
{'id':19,'name':"journal_aio",'level':0,'description':'异步写入日志时用libaio库，需要journal_dio设为true','default':'true'},
{'id':20,'name':"journal_force_aio",'level':0,'description':'强制使用异步写入日志','default':'false'},
{'id':21,'name':"journal_block_align",'level':0,'description':'块对齐写，dio和aio需要','default':'true'},
{'id':22,'name':"osd_journal_size",'level':0,'description':'OSD日志大小(MB)','default':5120},
{'id':23,'name':"osd_max_write_size",'level':0,'description':'OSD一次可写入的最大值(MB)','default':90},
{'id':24,'name':"osd_client_message_size_cap",'level':0,'description':'客户端允许在内存中的最大值(bytes)','default':524288000},
{'id':25,'name':"osd_client_message_cap",'level':0,'description':'客户端允许在内存中的messages的最大数量','default':100},
{'id':26,'name':"osd_deep_scrub_stride",'level':0,'description':'在deep-scrub时允许读取的字节数(bytes)','default':524288},
{'id':27,'name':"osd_map_dedup",'level':0,'description':'删除OSDMap中的重复项','default':'true'},
{'id':28,'name':"osd_op_threads",'level':0,'description':'OSD进程操作的线程数','default':2},
{'id':29,'name':"osd_disk_threads",'level':0,'description':'OSD密集型操作例如恢复和scrubbing时的线程','default':1},
{'id':30,'name':"osd_map_cache_size",'level':0,'description':'保留OSDMap的缓存(MB)','default':200},
{'id':31,'name':"osd_op_num_shards",'level':0,'description':'OSD::op_shardedwq中存储I/O的队列个数','default':5},
{'id':32,'name':"osd_op_num_threads_per_shard",'level':0,'description':'为OSD::op_shardedwq中每个队列分配的I/O分发线程数','default':2},
{'id':33,'name':"osd_snap_trim_sleep",'level':0,'description':'两个连续的snap裁剪之间的睡眠时间，单位是秒','default':0},
{'id':34,'name':"osd_scrub_sleep",'level':0,'description':'两个连续的scrub之间的睡眠时间，单位是秒','default':0},
{'id':35,'name':"osd_recovery_op_priority",'level':0,'description':'恢复操作的优先级。取值1-63，值越高占用资源越高','default':3},
{'id':36,'name':"osd_recovery_max_active",'level':0,'description':'同一时间内活跃的恢复请求数','default':3},
{'id':37,'name':"osd_recovery_max_chunk",'level':0,'description':'数据恢复块的最大值，单位是字节','default':8388608},
{'id':38,'name':"osd_recovery_threads",'level':0,'description':'恢复数据所需的线程数','default':1},
{'id':39,'name':"osd_recovery_sleep",'level':0,'description':'两个连续的recovery操作之间的睡眠时间，单位是秒','default':0},
{'id':40,'name':"osd_max_backfills",'level':0,'description':'一个OSD允许的最大backfills数','default':1},
{'id':41,'name':"osd_backfill_scan_min",'level':0,'description':'每个backfill扫描的最小object数','default':64},
{'id':42,'name':"osd_backfill_scan_max",'level':0,'description':'每个backfill扫描的最大object数','default':512},
{'id':43,'name':"mon_osd_down_out_interval",'level':0,'description':'指定Ceph在OSD守护进程的多少秒时间内没有响应后标记其为down或out状态','default':300},
{'id':44,'name':"mon_allow_pool_delete",'level':0,'description':'避免Ceph存储池的意外删除，请设置这个参数为false','default':'true'},
{'id':45,'name':"filestore_queue_max_ops",'level':0,'description':'数据盘最大接受的操作数','default':50}
]

commondict={
"filestore_queue_max_bytes":{'id':1,'level':0,'description':'数据盘一次操作的最大字节数','default':104857600},
"filestore_op_threads":{'id':2,'level':0,'description':'并发文件系统操作数','default':2},
"filestore_max_sync_interval":{'id':3,'level':0,'description':'从日志到数据盘最大同步间隔','default':5},
"filestore_min_sync_interval":{'id':4,'level':0,'description':'从日志到数据盘最小同步间隔','default':0.01},
"filestore_op_thread_timeout":{'id':5,'level':0,'description':'I/O线程超时告警时间','default':60},
"filestore_op_thread_suicide_timeout":{'id':6,'level':0,'description':'I/O线程自杀时间','default':180},
"filestore_fd_cache_size":{'id':7,'level':0,'description':'对象文件句柄缓存大小','default':128},
"filestore_fd_cache_shards":{'id':8,'level':0,'description':'对象文件句柄缓存分片个数','default':16},
"filestore_omap_header_cache_size":{'id':9,'level':0,'description':'扩展属性头缓存','default':1024},
"filestore_merge_threshold":{'id':10,'level':0,'description':'PG子目录合并的最小文件数','default':10},
"filestore_split_multiple":{'id':11,'level':0,'description':'PG子目录分裂乘数','default':2},
"max_open_files":{'id':12,'level':0,'description':'在操作系统层面设置的最大打开文件描述符','default':0},
"osd_pool_default_min_size":{'id':13,'level':0,'description':'存储池中的对象必须具有的最小副本数目','default':0},
"objecter_inflight_ops":{'id':14,'level':0,'description':'允许的最大未发送I/O请求数','default':1024},
"objecter_inflight_op_bytes":{'id':15,'level':0,'description':'允许的最大未发送脏数据','default':104857600},
"journal_max_write_entries":{'id':16,'level':0,'description':'journal一次性写入的最大记录数','default':100},
"journal_max_write_bytes":{'id':17,'level':0,'description':'journal一次性写入的最大字节数','default':10485760},
"journal_dio":{'id':18,'level':0,'description':'启用径直I/O到日志，需要journal_block_align设置为true','default':'true'},
"journal_aio":{'id':19,'level':0,'description':'异步写入日志时用libaio库，需要journal_dio设为true','default':'true'},
"journal_force_aio":{'id':20,'level':0,'description':'强制使用异步写入日志','default':'false'},
"journal_block_align":{'id':21,'level':0,'description':'块对齐写，dio和aio需要','default':'true'},
"osd_journal_size":{'id':22,'level':0,'description':'OSD日志大小(MB)','default':5120},
"osd_max_write_size":{'id':23,'level':0,'description':'OSD一次可写入的最大值(MB)','default':90},
"osd_client_message_size_cap":{'id':24,'level':0,'description':'客户端允许在内存中的最大值(bytes)','default':524288000},
"osd_client_message_cap":{'id':25,'level':0,'description':'客户端允许在内存中的messages的最大数量','default':100},
"osd_deep_scrub_stride":{'id':26,'level':0,'description':'在deep-scrub时允许读取的字节数(bytes)','default':524288},
"osd_map_dedup":{'id':27,'level':0,'description':'删除OSDMap中的重复项','default':'true'},
"osd_op_threads":{'id':28,'level':0,'description':'OSD进程操作的线程数','default':2},
"osd_disk_threads":{'id':29,'level':0,'description':'OSD密集型操作例如恢复和scrubbing时的线程','default':1},
"osd_map_cache_size":{'id':30,'level':0,'description':'保留OSDMap的缓存(MB)','default':200},
"osd_op_num_shards":{'id':31,'level':0,'description':'OSD::op_shardedwq中存储I/O的队列个数','default':5},
"osd_op_num_threads_per_shard":{'id':32,'level':0,'description':'为OSD::op_shardedwq中每个队列分配的I/O分发线程数','default':2},
"osd_snap_trim_sleep":{'id':33,'level':0,'description':'两个连续的snap裁剪之间的睡眠时间，单位是秒','default':0},
"osd_scrub_sleep":{'id':34,'level':0,'description':'两个连续的scrub之间的睡眠时间，单位是秒','default':0},
"osd_recovery_op_priority":{'id':35,'level':0,'description':'恢复操作的优先级。取值1-63，值越高占用资源越高','default':3},
"osd_recovery_max_active":{'id':36,'level':0,'description':'同一时间内活跃的恢复请求数','default':3},
"osd_recovery_max_chunk":{'id':37,'level':0,'description':'数据恢复块的最大值，单位是字节','default':8388608},
"osd_recovery_threads":{'id':38,'level':0,'description':'恢复数据所需的线程数','default':1},
"osd_recovery_sleep":{'id':39,'level':0,'description':'两个连续的recovery操作之间的睡眠时间，单位是秒','default':0},
"osd_max_backfills":{'id':40,'level':0,'description':'一个OSD允许的最大backfills数','default':1},
"osd_backfill_scan_min":{'id':41,'level':0,'description':'每个backfill扫描的最小object数','default':64},
"osd_backfill_scan_max":{'id':42,'level':0,'description':'每个backfill扫描的最大object数','default':512},
"mon_osd_down_out_interval":{'id':43,'level':0,'description':'指定Ceph在OSD守护进程的多少秒时间内没有响应后标记其为down或out状态','default':300},
"mon_allow_pool_delete":{'id':44,'level':0,'description':'避免Ceph存储池的意外删除，请设置这个参数为false','default':'true'},
"filestore_queue_max_ops":{'id':45,'level':0,'description':'数据盘最大接受的操作数','default':50}
}

#配置请求类
class ReqConfig:
    #self.h=链接集群的handle
    def __init__(self,handle,dict,reqid,logger):
        self.h=handle
        self.act=dict['act']
        self.page=dict['page']
        self.pagesize=dict['pagesize']
        self.val=dict['val']
        self.item=dict['item']
        self.reqid=int(reqid)
        self.logger=logger

    #线程处理函数
    def ThreadDealer(self):
        resp={}
        resp['act']=str(self.act)
        resp['type']='config'
        resp['status']='running'
        if self.act == 'getcommonlist' or self.act == 'getlistbyregex':
            resp['page']=self.page
        else:
            #setval2file/setval2temp
            resp['item']=self.item

        #tname=threading.currentThread().getName()
        mtx=global_list.get_value('MUTEX')
        #加锁->获取结果列表->追加结果->更新列表->解锁
        mtx.acquire()
        res=global_list.get_value('RESULTS')
        res[int(self.reqid)]=copy.deepcopy(resp)
        global_list.set_value('RESULTS',res)
        mtx.release()

        #执行具体命令
        if self.act == 'getcommonlist':
            ret=self.getcommonlist()
            if False == ret.has_key('err'):
                resp.update(ret)
                resp['status']='success'
            else:
                resp['status']='failed'
        elif self.act == 'getlistbyregex':
            ret=self.getlistbyregex()
            if False == ret.has_key('err'):
                resp.update(ret)
                resp['status']='success'
            else:
                resp['status']='failed'
        elif self.act == 'setval2file':
            ret=self.setval2file()
            if ret == False:
                resp['status']='failed'
            else:
                resp['status']='success'
        elif self.act == 'setval2temp':
            ret=self.setval2temp()
            if ret == False:
                resp['status']='failed'
            else:
                resp['status']='success'
        else:
            pass
 
        #更新结果
        mtx=global_list.get_value('MUTEX')
        mtx.acquire()
        res=global_list.get_value('RESULTS')
        res[int(self.reqid)]=copy.deepcopy(resp)
        global_list.set_value('RESULTS',res)
        mtx.release()

    #获取常用参数列表
    def getcommonlist(self):
        global commonlist
        respdict=dict()
        retval=dict()

        #获取一个mon主机地址
        mon_find = CephClusterCommand(self.h, prefix='mon_status', format='json')
        if mon_find.has_key('err') == True:
            self.logger.error("reqid:"+str(self.reqid)+" get mon_status failed, error is "+mon_find['err'])
            respdict['err']=mon_find['err']
            return respdict
        result=mon_find['result']
        host=result.get('monmap', 'None').get('mons', 'None')[0].get('name','None')
        self.logger.info("reqid:"+str(self.reqid)+" mon host is: "+str(host))

        #从mon获取所有常用参数的当前值
        cmd="/usr/bin/ssh "+str(host)+" /usr/bin/ceph daemon /var/run/ceph/ceph-mon."+str(host)+".asok config show -f json"
        ret,res=commands.getstatusoutput(cmd)
        if ret != 0:
            self.logger.error("reqid:"+str(self.reqid)+" get config by asok failed, ret="+str(ret)+", res="+res)
            respdict['err']='get daemon config failed'
            return respdict
        retval.update(json.loads(res))
        
        #分页获取数据
        start_idx=int(self.pagesize)*(int(self.page)-1)
        end_idx=start_idx+int(self.pagesize)
        for i in commonlist[start_idx:end_idx]:
            i['current']=retval.get(i['name'],'0')
        respdict['commonlist']=commonlist[start_idx:end_idx]
        respdict['count']=len(commonlist)
        self.logger.info("reqid:"+str(self.reqid)+" success")
        return respdict
       
    #模糊查询参数列表
    def getlistbyregex(self):
        respdict=dict()
        resplist=[]
        retval=dict()

        #获取mon主机地址
        mon_find = CephClusterCommand(self.h, prefix='mon_status', format='json')
        if mon_find.has_key('err') == True:
            self.logger.error("reqid:"+str(self.reqid)+" get mon_status failed, error is "+mon_find['err'])
            respdict['err']=mon_find['err']
            return respdict
        result=mon_find['result']
        host=result.get('monmap', 'None').get('mons', 'None')[0].get('name','None')
        self.logger.info("reqid:"+str(self.reqid)+" mon host is: "+str(host))

        #从mon获取所有常用参数的当前值
        cmd="/usr/bin/ssh "+str(host)+" /usr/bin/ceph daemon /var/run/ceph/ceph-mon."+str(host)+".asok config show -f json"
        ret,res=commands.getstatusoutput(cmd)
        if ret != 0:
            self.logger.error("reqid:"+str(self.reqid)+" get config by asok failed, ret="+str(ret)+", res="+res)
            respdict['err']='get daemon config failed'
            return respdict
        retval.update(json.loads(res))

        #正则匹配关键字
        idx=1
        it=retval.iteritems()
        for k,v in it:
            #进行匹配
            if -1 != k.find(str(self.val)):
                d=dict()
                d['id']=idx
                d['name']=k
                #是否为常用参数
                if True == commondict.has_key(k):
                    d['default']=commondict[k].get('default')
                    d['level']=commondict[k].get('level')
                    d['description']=commondict[k].get('description')
                else:
                    d['default']='-'
                    d['level']=1
                    d['description']='-'
                d['current']=v
                resplist.append(d)
                idx+=1
            else:
                continue
            
        #选取page范围内数据
        start_idx=int(self.pagesize)*(int(self.page)-1)
        end_idx=start_idx+int(self.pagesize)
        respdict['list']=resplist[start_idx:end_idx]
        respdict['count']=len(resplist)
       
        self.logger.info("reqid:"+str(self.reqid)+" success")
        return respdict

    #辅助函数，得到主机列表
    def gethosts(self):
        respdict=dict()

        cmd="/usr/bin/ceph osd tree | /usr/bin/grep -w host | /usr/bin/awk '{print $4}'"
        ret,hosts=commands.getstatusoutput(cmd)
        if ret != 0:
            self.logger.error("reqid:"+str(self.reqid)+" ceph osd tree failed, ret="+str(ret)+", res="+hosts)
            respdict['err']='ceph osd tree failed'
            return respdict
        lines=hosts.split('\n')
        respdict['hosts']=lines

        self.logger.info("reqid:"+str(self.reqid)+" success")
        return respdict

    #修改参数值到配置文件
    def setval2file(self):
        #首先临时修改
        ret=self.setval2temp()
        if ret == False:
            self.logger.error("reqid:"+str(self.reqid)+" setval2temp failed")
            return False

        #拷贝一个配置文件的副本
        cmd="/usr/bin/cp -p /etc/ceph/ceph.conf /etc/ceph/ceph.conf.bak"
        ret=os.system(cmd)
        if ret != 0:
            self.logger.error("reqid:"+str(self.reqid)+" cp ceph.conf->ceph.conf.bak failed")
            return False

        #然后修改配置文件
        config=ConfigParser.ConfigParser()
        with open("/etc/ceph/ceph.conf.bak","r") as cfgfile:
            config.readfp(cfgfile)
            config.set("global",str(self.item),str(self.val))
            config.write(open("/etc/ceph/ceph.conf.bak","w"))

        #push该配置文件
        hosts=self.gethosts()
        if True == hosts.has_key('err'):
            self.logger.error("reqid:"+str(self.reqid)+" get hosts failed")
            return False

        #将配置文件拷贝到各个主机，但并不覆盖原始文件
        for i in hosts['hosts']:
            cmd="/usr/bin/scp /etc/ceph/ceph.conf.bak "+str(i)+":/etc/ceph/ceph.conf.bak"
            ret=os.system(cmd)
            if ret != 0:
                self.logger.error("reqid:"+str(self.reqid)+" cp ceph.conf.bak->remote hosts failed")
                return False

        #在各个节点上mv文件
        for i in hosts['hosts']:
            cmd="/usr/bin/ssh "+str(i)+" /usr/bin/mv -f /etc/ceph/ceph.conf.bak /etc/ceph/ceph.conf"
            ret=os.system(cmd)
            if ret != 0:
                self.logger.error("reqid:"+str(self.reqid)+" mv ceph.conf.bak on remote hosts failed")
                return False

        #本节点也要mv
        cmd="/usr/bin/mv -f /etc/ceph/ceph.conf.bak /etc/ceph/ceph.conf"
        ret=os.system(cmd)
        if ret != 0:
            self.logger.error("reqid:"+str(self.reqid)+" mv ceph.conf.bak on localhost failed")
            return False

        self.logger.info("reqid:"+str(self.reqid)+" set "+str(self.item)+" 2 configfile success")
        return True

    #临时修改参数值
    def setval2temp(self):
        #tell all MONs
        cmd="/usr/bin/ceph tell mon.* injectargs '--"+str(self.item)+" "+str(self.val)+"'"
        ret=os.system(cmd)
        if ret != 0:
            self.logger.error("reqid:"+str(self.reqid)+" tell MONs failed")
            return False

        #tell all OSDs
        cmd="/usr/bin/ceph tell osd.* injectargs '--"+str(self.item)+" "+str(self.val)+"'"
        ret=os.system(cmd)
        if ret != 0:
            self.logger.error("reqid:"+str(self.reqid)+" tell OSDs failed")
            return False

        self.logger.info("reqid:"+str(self.reqid)+" set "+str(self.item)+" success")
        return True

    #析构
    def __del__(self):
        pass
