#!/usr/bin/env python
# encoding: utf-8
"""
===============================================================================

         FILE:  mysql_checker.py

        USAGE:  ./mysql_checker.py

  DESCRIPTION:  

      OPTIONS:  ---
 REQUIREMENTS:  ---
         BUGS:  ---
        NOTES:  ---
       AUTHOR:  Xu Xiang(x2x4@x2x4.net)
      COMPANY:  x2x4.net
      VERSION:  1.0
      CREATED:  2015-01-12
     REVISION:  ---
===============================================================================
"""

import sys
import os
import re
import getopt
import ConfigParser
import MySQLdb
import time
import json

DEBUG = True
VERION = 0.01


script_path = os.path.dirname(os.path.abspath(sys.argv[0]))
script_raw_name = sys.argv[0].rstrip('.py').split('/')[-1] #same sys.argv[0].split('.py')[0].split('/')[-1]

default_options = {'mysql' : {'username':'monmysql','password':'monmysql','port':3306},
                   'conf_file' : script_path + '/'  + script_raw_name + '.cfg',}

output_binding = { 'zabbix_agent' : {
                       "binding" : "zaagt" , 
                       "explain": "Zabbix agent use",
                       "class" : "outputType"
                       },
                       'zabbix_sender' : {
                           "binding" : "zased" , 
                           "explain": "Zabbix Sender Datatype",
                           "class" : "outputType"
                       },
                       'nagios' : {
                           "binding" : "nagagt" , 
                           "explain": "Nagios Datatype",
                           "class" : "outputType"
                       },
                  }

type_binding =  { 'qps_per' : {
                       "binding" : "qps" , 
                       "explain": "QPS by per sec", 
                       "class" : "mysqlDataType",
                       "argv" : "per",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Queries:Questions:Uptime",
                       },
                   'qps_all' : {
                       "binding" : "qps" , 
                       "explain": "QPS by all", 
                       "class" : "mysqlDataType",
                       "argv" : "all",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Queries:Questions:Uptime"
                       },
                   'tps_per' : {
                       "binding" : "tps" , 
                       "explain": "TPS by per sec", 
                       "class" : "mysqlDataType",
                       "argv" : "per",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Com_commit:Com_rollback:Uptime"
                       },
                   'tps_all' : {
                       "binding" : "tps" , 
                       "explain": "TPS by all", 
                       "class" : "mysqlDataType",
                       "argv" : "all",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Com_commit:Com_rollback:Uptime"
                       },
                   'key_buffer_read_hits_per' : {
                       "binding" : "key_buffer_read_hits" , 
                       "explain": "key Buffer Read Hits per sec", 
                       "class" : "mysqlDataType",
                       "argv" : "per",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Key_reads:Key_read_requests"
                       },
                   'key_buffer_read_hits_all' : {
                       "binding" : "key_buffer_read_hits" , 
                       "explain": "key Buffer Read Hits all", 
                       "class" : "mysqlDataType",
                       "argv" : "all",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Key_reads:Key_read_requests"
                       },
                   'key_buffer_write_hits_per' : {
                       "binding" : "key_buffer_write_hits" , 
                       "explain": "key Buffer Write Hits per sec", 
                       "class" : "mysqlDataType",
                       "argv" : "per",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Key_writes:Key_write_requests"
                       },
                   'key_buffer_write_hits_all' : {
                       "binding" : "key_buffer_write_hits" , 
                       "explain": "key Buffer Write Hits all", 
                       "class" : "mysqlDataType",
                       "argv" : "all",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Key_writes:Key_write_requests"
                       },
                   'innodb_buffer_hits_per' : {
                       "binding" : "innodb_buffer_hits" , 
                       "explain": "InnoDB Buffer per sec", 
                       "class" : "mysqlDataType",
                       "argv" : "per",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Innodb_buffer_pool_read_requests:Innodb_buffer_pool_reads"
                       },
                   'innodb_buffer_hits_all' : {
                       "binding" : "innodb_buffer_hits" , 
                       "explain": "InnoDB Buffer all", 
                       "class" : "mysqlDataType",
                       "argv" : "all",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Innodb_buffer_pool_read_requests:Innodb_buffer_pool_reads"
                       },
                    'innodb_buffer_write_per' : {
                       "binding" : "innodb_buffer_pool_write" , 
                       "explain": "InnoDB pool write per sec", 
                       "class" : "mysqlDataType",
                       "argv" : "per",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Innodb_buffer_pool_write_requests"
                       },
                   'innodb_buffer_write_all' : {
                       "binding" : "innodb_buffer_pool_write" , 
                       "explain": "InnoDB pool write all", 
                       "class" : "mysqlDataType",
                       "argv" : "all",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Innodb_buffer_pool_write_requests"
                       },
                   'query_cache_hits' : {
                       "binding" : "query_cache_hits" , 
                       "explain": "Query Cache Hits", 
                       "class" : "mysqlDataType",
                       "argv" : "all",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Qcache_hits:Qcache_inserts"
                       },
                   'table_cache_usage' : {
                       "binding" : "table_cache_usage" , 
                       "explain": "Table Cache Usage", 
                       "class" : "mysqlDataType",
                       "argv" : "all",
                       #"type_sql" : "show global status",
                       "type_sql" : "show global status;show VARIABLES",
                       "type_owner" : "global_status",
                       #"type_keys" : "Open_tables:Opened_tables"
                       "type_keys" : "Open_tables:table_open_cache"
                       },
                   'thread_cache_hits' : {
                       "binding" : "thread_cache_hits" , 
                       "explain": "Thread Cache Hits", 
                       "class" : "mysqlDataType",
                       "argv" : "all",
                       "type_sql" : "show global status",
                       "type_owner" : "global_status",
                       "type_keys" : "Connections:Threads_created"
                       },
                }

argv_options = { 'h' : {"conflict" : None , "binding" : "usage"},
                 'i' : {"conflict" : None , "binding" : "install"},
                 'H:' : {"conflict" : "-S" , "binding" : "connect_by_ip", "class" : "connectMysql"},
                 'S:' : {"conflict" : "-H" , "binding" : "connect_by_sock","class" : "connectMysql"},
                 "u:" : {"conflict" : None , "binding" : None},
                 "p:" : {"conflict" : None , "binding" : None},
                 "t:" : {"conflict" : None , "binding" : None},
                 "o:" : {"conflict" : None , "binding" : None},
                 "c:" : {"conflict" : None , "binding" : None},
                }

re_search = { 'host_port' : re.compile(r'^(\d+(?:\.\d+){3}|[a-zA-Z0-9._\-]+)(?:\:(\d+))?$'),
              'sock_file' : re.compile(r'^([a-zA-Z0-9._\-\s/]+)'),
              'empty' : re.compile(r'^\s*$'),
              'mysql_version' : re.compile(r'^(\d+)\.(\d+)\.(\d+).*'),
            }

running_option = {'connect_type':{},'data_type':{},'output_type':{},'other_opts':{}}

class connectMysql(object):
    def __init__(self,options):
        self.opts = options
    
    def connect(self):
        conn = None
        if self.opts['connect_type']['type'] == 'H:':
            conn = self.connect_by_ip()
        if self.opts['connect_type']['type'] == 'S:':
            conn = self.connect_by_sock()
        return conn
    
    def connect_by_ip(self):
        debug('ip connect: ' + str(self.opts['connect_type']['server']))
        if not self.opts['connect_type']['password']:
            conn = MySQLdb.connect(host=self.opts['connect_type']['server'],user=self.opts['connect_type']['username'],port=self.opts['connect_type']['port'])
        else:
            conn = MySQLdb.connect(host=self.opts['connect_type']['server'],user=self.opts['connect_type']['username'],passwd=self.opts['connect_type']['password'],port=self.opts['connect_type']['port'])
        return conn
        
    def connect_by_sock(self):
        debug('sock connect: ' + str(self.opts['connect_type']['file']))
        if not self.opts['connect_type']['password']:
            conn = MySQLdb.connect(unix_socket=self.opts['connect_type']['file'],user=self.opts['connect_type']['username'])
        else:
            conn = MySQLdb.connect(unix_socket=self.opts['connect_type']['file'],user=self.opts['connect_type']['username'],passwd=self.opts['connect_type']['password'])
        return conn

#

class storageSQLite(object):
    def __init__(self,opts):
        self.opts = opts
        #debug('in storageSQLite')
        
    def init_db(self,db_handle):
        #debug('in storageSQLite.init_db')
        #"SQLite数据库结构"
        #|id|unixtime|connect_type|binding|type_owner|type_key|value|
        try:
            cursor = db_handle.cursor()
            db_name = self.opts['hostname']
            sql = 'create table ' + db_name + ' (id integer primary key, unixtime INTEGER,connect_type text, binding text ,type_owner text, type_key text, value INTEGER)'
            cursor.execute(sql)
        except db_handle.OperationalError as e:
            error('initial sqlite db error: ' + str(e),1)

    def testing(self,db_handle):
        #debug('in storageSQLite.testing')
        have_record = False
        try:
            cursor = db_handle.cursor()
            db_name = self.opts['hostname']
            sql = 'select count(*) from sqlite_master where type="table" and name="' + db_name + '"'
            count = cursor.execute(sql).fetchall()[0][0]
            #优化大小
            cursor.execute('Vacuum')
            #debug('db count ' + str(count))
            if count > 0:
                have_record = True
        except db_handle.OperationalError as e:
            error('testing sqlite db error: ' + str(e),1)
        return have_record                    
    
    def connect(self):
        #debug('in storageSQLite.connect')
        sqlite = None
        try:
            import sqlite3
            try:
                db_file = self.opts[self.opts['default_storage']]['file']
                sqlite = sqlite3.connect(db_file)
                have_record = self.testing(sqlite)
                if not have_record:
                    debug('Init sqlite db')
                    self.init_db(sqlite)
            except sqlite3.OperationalError as e:
                error('Connect storage ' + self.opts['default_storage'] + ' error: ' + str(e) + '\n' + sql, 1)
        except ImportError as e:
            error('Import error: ' + str(e),1)
        return sqlite
    
    
    def loading(self,db_handle):
        #debug('in storageSQLite.loading')
        #|id|unixtime|connect_type|binding|type_owner|type_key|value|
        last_data = {}
        try:
            cursor = db_handle.cursor()
            db_name = self.opts['hostname']
            #get last data
            record_num = 0
            for type_key in type_binding[running_option['data_type']]['type_keys'].split(':'):
                sql = 'select type_owner,type_key,value from ' + db_name + ' where type_key = "' + type_key + '" and binding = "' + running_option['data_type'] + '" and connect_type = "' + running_option['connect_type']['raw'] + '" order by value desc limit 0,1' 
                debug(sql)
                sql_return = cursor.execute(sql).fetchall()[0]
                #[0] = type_owner
                #[1] = type_key
                #[2] = value
                debug(cursor.execute(sql).fetchall())
                if not last_data.has_key(sql_return[0]):
                    last_data[sql_return[0]] = {}
                last_data[sql_return[0]][sql_return[1]] = sql_return[2]
            #debug(last_data)
        except db_handle.OperationalError as e:
            error('Loading data from storage ' + self.opts['default_storage'] + ' error: ' + str(e) + '\n' + sql,1)
        except IndexError:
            last_data = 0
        debug(last_data)
        return last_data
    
    #
    def saving(self,db_handle,data):
        #debug('in storageSQLite.saving')
        #"SQLite数据库结构"
        #|id|unixtime|connect_type|binding|type_owner|type_key|value|
        try:
            cursor = db_handle.cursor()
            db_name = self.opts['hostname']
            time_now = int(time.time())
            #debug(data)
            for type_key in type_binding[running_option['data_type']]['type_keys'].split(':'):
                sql = 'insert into ' + db_name + '(unixtime,connect_type,binding,type_owner,type_key,value) values(\'' + str(time_now) + '\',\'' + running_option['connect_type']['raw'] + '\',\'' + running_option['data_type'] + '\',\'' + type_binding[running_option['data_type']]['type_owner'] + '\',\'' + type_key + '\',\'' + data[type_binding[running_option['data_type']]['type_owner']][type_key]  + '\')'
                #debug(sql)
                cursor.execute(sql)
                db_handle.commit()
        except db_handle.OperationalError as e:
            error('Saving data to storage ' + self.opts['default_storage'] + ' error: ' + str(e) + '\n' + sql,1)    
        

class searchStorageStore(object):
    def __init__(self,opts):
        self.opts = opts
        self.method = {
            'sqlite' : {
                'class' : 'storageSQLite',
                'testing' : 'testing',
                'loading' : 'loading',
                'saving' : 'saving',
            },
            #
            'mysql' : {
                'class' : 'storageMySQL',
                'testing' : 'test_mysql',
                'loading' : 'load_mysql',
                'saving' : 'save_mysql',
            },
            #
            'mongodb' : {
                'class' : 'storageMongoDB',
                'testing' : 'test_mongodb',
                'loading' : 'load_mongodb',
                'saving' : 'save_mongodb',
            },
        }
    
    def list_storage(self):
        return self.method.keys()

    def search_store(self):
        #debug('search_store: ' + str(self.opts))
        for k,v in self.method[self.opts['default_storage']].items():
            self.opts[self.opts['default_storage']][k] = v
        return self.opts
        
#
class mysqlDataType(object):
    #def __init__(self,opts):
    #    self.opts = opts
    
    def qps(self,last_data,new_data,argv='per'):
        #debug('qps ' + argv)
        #在 MySQL 5.1.30 及以下版本可以通过 Questions 状态值每秒内的变化量来近似表示,
        #而从 MySQL 5.1.31 开始,则可以通过 Queries 来表示。
        #Queries 是在 MySQL 5.1.31 才新增的状态变量。
        #主要解决的问题就是 Questions 状态变量并没有记录存储过程中所执行的 Query
        if int(new_data['version'][1]) >= 1 and int(new_data['version'][2]) > 30:
            query_name = 'Queries'
        else:
            query_name = 'Questions'
        debug('QPS: ' + query_name)
        try:
            if isinstance(last_data, int) or argv == 'all':
                #for all qps
                #Questions(or Queries) / Uptime
                qps = int(new_data['global_status'][query_name])/int(new_data['global_status']['Uptime'])
            else:
                #for sec qps
                #(New Questions - Old Questions) / (New Uptime - Old Uptime)
                #debug(new_data)
                qps = ( 
                        int(new_data['global_status'][query_name]) - int(last_data['global_status'][query_name]) 
                      ) / ( 
                        int(new_data['global_status']['Uptime']) - int(last_data['global_status']['Uptime']) 
                      )
        except ZeroDivisionError:
        #debug(qps)
            qps = 0
        return qps


    def tps(self,last_data,new_data,argv='per'):
        debug('tps: ' + argv)
        try:
            if isinstance(last_data, int) or argv == 'all':
                #for all tps
                #TPS = (Com_commit + Com_rollback) / Uptime
                tps = (int(new_data['global_status']['Com_commit']) + int(new_data['global_status']['Com_rollback'])) / int(new_data['global_status']['Uptime'])
            else:
                #for per tps
                #TPS = ( (New Com_commit - Old Com_commit) + (New Com_rollback - Old Com_rollback) ) / (New Uptime - Old Uptime)
                tps = ( 
                        ( 
                          int(new_data['global_status']['Com_commit']) - int(last_data['global_status']['Com_commit']) 
                        ) + ( 
                          int(new_data['global_status']['Com_rollback']) - int(last_data['global_status']['Com_rollback']) 
                        )
                      ) / ( 
                        int(new_data['global_status']['Uptime']) - int(last_data['global_status']['Uptime']) 
                      )
        except ZeroDivisionError:
            tps = 0
        return tps
    
    def thread_cache_hits(self,last_data,new_data,argv='all'):
        debug('thread_cache_hits: '+argv)
        try:
            if isinstance(last_data, int) or argv == 'all':
                thread_cache_hits = ( 1 - float(new_data['global_status']['Threads_created']) / float(new_data['global_status']['Connections']) ) * 100
            else:
                pass
        except ZeroDivisionError:
            thread_cache_hits = 0
        return '%.2f' % thread_cache_hits
    
    def table_cache_usage(self,last_data,new_data,argv='all'):
        debug('table_cache_usage: '+argv)
        #table_cache(5.1.3之后这个值叫做table_open_cache)
        #Open_tables是当前正在打开表的数量，Opened_tables是所有已经打开表的数量。
        #如果Open_tables的值已经接近table_cache的值，且Opened_tables还在不断变大，则说明mysql正在将缓存的表释放以容纳新的表，此时可能需要加大table_cache的值。
        #对于大多数情况，比较适合的值：
        #Open_tables / Opened_tables >= 0.85
        #Open_tables / table_cache <= 0.95
        table_cache = float(new_data['global_status']['Open_tables']) / float(new_data['global_status']['table_open_cache']) * 100
        return '%.2f' % table_cache
        
    def innodb_buffer_hits(self,last_data,new_data,argv='per'):
        debug('innodb_buffer_hits '+argv)
        try:
            #innodb_buffer_read_hits = (1 - innodb_buffer_pool_reads / innodb_buffer_pool_read_requests) * 100%
            if isinstance(last_data, int) or argv == 'all':
                innodb_buffer_hits = ( 1 -  float(new_data['global_status']['Innodb_buffer_pool_reads']) / float(new_data['global_status']['Innodb_buffer_pool_read_requests'])) * 100
            else:
                innodb_buffer_hits = ( 1 -  (  
                                                ( 
                                                    float(new_data['global_status']['Innodb_buffer_pool_reads']) - float(last_data['global_status']['Innodb_buffer_pool_reads']) 
                                                ) / ( 
                                                    float(new_data['global_status']['Innodb_buffer_pool_read_requests']) - float(last_data['global_status']['Innodb_buffer_pool_read_requests']) 
                                                )
                                            )
                                     ) * 100
        except ZeroDivisionError:
            innodb_buffer_hits = 0
        return '%.2f' % innodb_buffer_hits

    def innodb_buffer_pool_write(self,last_data,new_data,argv='per'):
        debug('innodb_buffer_pool_write ' + argv)
        if isinstance(last_data, int) or argv == 'all':
            innodb_buffer_pool_write = int(new_data['global_status']['Innodb_buffer_pool_write_requests'])
        else:
            innodb_buffer_pool_write = int(new_data['global_status']['Innodb_buffer_pool_write_requests']) - int(last_data['global_status']['Innodb_buffer_pool_write_requests'])
        return innodb_buffer_pool_write
    
    def query_cache_hits(self,last_data,new_data,argv='all'):
        debug('query_cache_hits '+argv)
        #Query_cache_hits = (Qcache_hits / (Qcache_hits + Qcache_inserts )) * 100%; 
        try:
            if isinstance(last_data, int) or argv == 'all':
                query_cache_hits = ( float(new_data['global_status']['Qcache_hits']) / ( float(new_data['global_status']['Qcache_hits']) + float(new_data['global_status']['Qcache_inserts']) ) ) * 100
            else:
                pass
                #uery_cache_hits = ( float(new_data['global_status']['Qcache_hits']) / ( float(new_data['global_status']['Qcache_hits']) + float(new_data['global_status']['Qcache_inserts']) ) ) * 100
        except ZeroDivisionError:
            query_cache_hits = 0
        return '%.2f' % query_cache_hits
    
    def key_buffer_write_hits(self,last_data,new_data,argv='per'):
        debug('key_buffer_write_hits '+argv)
        #key_buffer_write_hits = (1-key_writes / key_write_requests) * 100% 
        try:
            if isinstance(last_data, int) or argv == 'all':
                key_buffer_write_hits = ( 1 - float(new_data['global_status']['Key_writes']) / float(new_data['global_status']['Key_write_requests']) ) * 100
            else:
                key_buffer_write_hits = ( 1 - 
                                            ( 
                                                float(new_data['global_status']['Key_writes']) - float(last_data['global_status']['Key_writes']) 
                                            ) / ( 
                                                float(new_data['global_status']['Key_write_requests']) -  float(last_data['global_status']['Key_write_requests'])
                                            )
                                        ) * 100
        except ZeroDivisionError:
            key_buffer_write_hits = 0
        return '%.2f' % key_buffer_write_hits
    
    def key_buffer_read_hits(self,last_data,new_data,argv='per'):
        debug('key_buffer_read_hits '+argv)
        #key_buffer_read_hits = (1-key_reads / key_read_requests) * 100% 
        try:
            if isinstance(last_data, int) or argv == 'all':
                key_buffer_read_hits = ( 1 - float(new_data['global_status']['Key_reads']) / float(new_data['global_status']['Key_read_requests'])) * 100
            else:
                key_buffer_read_hits = ( 1 - 
                                            ( 
                                                float(new_data['global_status']['Key_reads']) - float(last_data['global_status']['Key_reads']) 
                                            ) / ( 
                                                float(new_data['global_status']['Key_read_requests']) - float(last_data['global_status']['Key_read_requests']) 
                                            )
                                        ) * 100
        except ZeroDivisionError:
            key_buffer_read_hits = 0
        return '%.2f' % key_buffer_read_hits

#
class outputType(object):
    #def __init__(self,opts):
    #    self.opts = opts


    def zaagt(self,data):
        #zabbix agent
        debug('zabbix agent')
        print data
    
    def zased(self,data):
        #zabbix sender
        debug('zabbix sender')
        
    def nagagt(self,data):
        #nagios agent
        debug('nagios agent')


def debug(words):
    if DEBUG:
        print '[' + time.strftime('%Y-%m-%d %H:%M:%S') + '] ' +  '[DEBUG] ' + str(words)

def error(words,code=0):
    print '[' + time.strftime('%Y-%m-%d %H:%M:%S') + '] ' +  '[ERROR] ' + str(words)
    if code > 0:
        sys.exit(code)
        
#
class myError(Exception):
    '''
    my own exception
    '''
    def __init__(self, value):
        self.value=value
    
    def __str__(self):
        return self.value


def install():
    #配置脚本，生成配置文件
    debug('install')
    sys.exit()
    
def get_opts():
    #获得允许的命令行参数
    argv_list = ''.join(argv_options.keys())
    try:
        #获取命令行参数
        opts, args = getopt.getopt(sys.argv[1:], argv_list)
    except getopt.GetoptError as e:
        error(str(e) + '\n' + 'Please run ' + sys.argv[0] + ' -h to get help', 1)
    count = 0
    opt_key_array = []
    #opts是一个数组，每一样是一个kv的数据块，我们需要把它拆开,只要参数，不要值
    #[('-H', 'localhost'), ('-t', 'qps_per'), ('-o', 'zabbix_agent'), ('-u', 'root'), ('-p', 'root')]
    #['-H', '-t', '-o', '-u', '-p']
    while count < len(opts):
        opt_key_array.append(opts[count][0])
        count = count + 1
    try:
        #遍历命令行参数
        for opt,val in opts:
            if opt == '-h':
                usage()
            if opt == '-H':
                #connect by servername or ip
                #let's check options
                #因为IP和SOCK连接方式只能2选1，所以要排除对方
                running_option['connect_type']['raw'] = val
                if argv_options['H:']['conflict'] in opt_key_array:
                    raise ValueError('Option conflict problem, -H conflict with ' + argv_options['H:']['conflict'])
                #192.168.1.2:3306 or 192.168.1.2
                #拆开hostname/ip 与端口 
                is_match = re_search['host_port'].search(val)
                if is_match:
                    #print is_match.groups()
                    if not is_match.groups()[1]:
                        port = default_options['mysql']['port']
                    else:
                        port = is_match.groups()[1]
                    running_option['connect_type']['type'] = 'H:'
                    running_option['connect_type']['server'] = is_match.groups()[0]
                    running_option['connect_type']['port'] = int(port)
                    #python mysql problem, if host='localhost', always use sock to connect
                    #or you can change you my.cnf setting, add
                    #[client]
                    #protocol=tcp
                    if running_option['connect_type']['server'] == 'localhost':
                        running_option['connect_type']['server'] = '127.0.0.1'
                    #print running_option
                else:
                    raise ValueError('-H Value problem')
            if opt == '-S':
                #connect by socket file
                #let's check options
                running_option['connect_type']['raw'] = val
                if argv_options['S:']['conflict'] in opt_key_array:
                    raise ValueError('[ERROR] Option conflict problem, -S conflict with ' + argv_options['S:']['conflict'])
                is_match = re_search['sock_file'].search(val)
                if is_match:
                    if os.path.exists(val):
                        running_option['connect_type']['type'] = 'S:'
                        running_option['connect_type']['file'] = val
                        #print running_option
                    else:
                        raise ValueError('Sock file ' + val + ' not exist')
                else:
                    raise ValueError('-H Value problem')
            if opt == '-u':
                running_option['connect_type']['username'] = val
            if opt == '-p':
                running_option['connect_type']['password'] = val
            if opt == '-i':
                install()
            if opt == '-c':
                running_option['other_opts']['conf_file'] = val
            if opt == '-t':
                if val in type_binding.keys():
                    running_option['data_type'] = val
                else:
                    raise ValueError('-t value ' + val + ' not in ' + str(sorted(type_binding.keys())))
            if opt == '-o':
                if val in output_binding.keys():
                    running_option['output_type'] = val
                else:
                    raise ValueError('-o value ' + val + ' not in ' + str(sorted(output_binding.keys())))
        ## Check type
        if not running_option['data_type']:
            raise ValueError('MySQL DataType not defined')
        if not running_option['output_type']:
            raise ValueError('OutputType not defined')
        if running_option['connect_type'].has_key('username') and not re_search['empty'].search(running_option['connect_type']['username']):
            #Yes command line defined mysql user
            #Let's check user's password
            if not running_option['connect_type'].has_key('password'):
                #password val is empty
                debug('No User and Passwd defined, use default')
                running_option['connect_type']['username'] = default_options['mysql']['username']
                running_option['connect_type']['password'] = default_options['mysql']['password']
            if not running_option['connect_type'].has_key('password'):
                #no password given
                running_option['connect_type']['password'] = False
            else:
                #have password
                if re_search['empty'].search(running_option['connect_type']['password']):
                    #password value is empty
                    running_option['connect_type']['password'] = False
        else:
            #no user defined, use default value instead
            debug('No User and Passwd defined, use default')
            running_option['connect_type']['username'] = default_options['mysql']['username']
            running_option['connect_type']['password'] = default_options['mysql']['password']
        if not running_option['other_opts'].has_key('conf_file'):
            running_option['other_opts']['conf_file'] = default_options['conf_file']
        #check connect type (is Class)
        #print running_option
        getattr(eval(argv_options[running_option['connect_type']['type']]['class']),argv_options[running_option['connect_type']['type']]['binding'])
        #check data type
        #print type_binding[running_option['data_type']]['binding']
        getattr(eval(type_binding[running_option['data_type']]['class']),type_binding[running_option['data_type']]['binding'])
        #check output type
        getattr(eval(output_binding[running_option['output_type']]['class']),output_binding[running_option['output_type']]['binding'])
    except ValueError as e:
        error(str(e) + '\n' + 'Please run ' + sys.argv[0] + ' -h to get help', 1)
    except NameError as e:
        error(str(e) + ', Please check command line', 1)
    except AttributeError as e:
        error(str(e) + ', Please check command line', 1)    
        
    
    

def usage(code=0):
    print "Usage: " + sys.argv[0] + " (Connect Type) (Mysql Status Type) (Output Type) (Other Options)\n"
    print "Connect Type:"
    print "   -H  Connect by hostname/ip[:port] [-u username -p password] | -H 192.168.1.200:3306 | -H db1:3307"
    print "   -S  Connect by Sock [-u username -p password] | -S /var/mysql/mysql.sock -u xxx -p yyy"
    print "   If no username & password given, script will use monmysql/monmysql\n"
    print "Mysql Status Type:"
    print "   -t  " + str(sorted(type_binding.keys()))
    for tb_key in sorted(type_binding.keys()):
        print "       " + tb_key + " : " + type_binding[tb_key]['explain']
    print ""
    print "Output Type:"
    print "   -o  " + str(sorted(output_binding.keys()))
    for ob_key in sorted(output_binding.keys()):
        print "       " + ob_key + " : " + output_binding[ob_key]['explain']
    print ""
    print "Other Options:"
    print "   -c  read configure setting from given file"
    print "   -i  setup script step by step"
    print ""
    print "Version: " + str(VERION)
    sys.exit(code)


def prepare_process():
    "return mysql conn obj"
    debug('[prepare_process]')
    try:
        #check mysql connection
        conn_obj = connectMysql(running_option)
        conn = conn_obj.connect()
    except MySQLdb.Error as e:
        error('MySQL error code:{' + str(e[0]) + '} msg:{' +  str(e[1]) + '}, Please check command line', 1)
    return conn
    #Make link


def get_last_data(data_storage):
    #get last data from storage(kv or mysql or sqllite)
    debug('[get_last_data]')
    last_data = {}
    #start to get data
    #debug(data_storage[data_storage['default_storage']]['loading'])
    #动态实例化类
    sto = eval(data_storage[data_storage['default_storage']]['class'])(data_storage)
    #动态加载类方法
    sto_conn = getattr(sto,'connect')
    conn = sto_conn()
    sto_load = getattr(sto,data_storage[data_storage['default_storage']]['loading'])
    last_data = sto_load(conn)
    return last_data

def get_new_data(conn,data_storage):
    #get new data from mysql and save it storage(kv or mysql or sqllite)
    debug('[get_new_data]')
    new_data = {}
    try:
        cursor = conn.cursor()
        #根据查询类别到type_binding这个hash查找相应的参数
        #参数包括
        #type_binding['查询类别']['type_owner'] : 这个是new_data哈希的key
        #type_binding['查询类别']['type_sql'] : 查询SQL语句,如果用;隔开，表示多条语句
        #type_binding['查询类别']['type_keys'] : 需要用到的状态值 data_type
        new_data[type_binding[running_option['data_type']]['type_owner']] = {}
        #检查是否是多条语句
        sqls = type_binding[running_option['data_type']]['type_sql'].rstrip(';').split(';')
        sql_data = {}
        for sql in sqls:
            cursor.execute(sql)
            for k,v in cursor.fetchall():
                sql_data[k] = v
        for type_key in type_binding[running_option['data_type']]['type_keys'].split(':'):
            #debug(type_key)
            #debug(sql_data[type_key])
            if sql_data.has_key(type_key):
                new_data[type_binding[running_option['data_type']]['type_owner']][type_key] = sql_data[type_key]
            else:
                raise myError('MySQL status have not ' + type_key + ' ,Please check')
        cursor.execute('select version()')
        new_data['version'] = []
        mysql_version = cursor.fetchall()[0][0]
        is_match = re_search['mysql_version'].search(mysql_version)
        if is_match:
            #debug(is_match.groups())
            new_data['version'] = is_match.groups()
        debug(new_data)
        #Saving data
        #动态实例化类
        sto = eval(data_storage[data_storage['default_storage']]['class'])(data_storage)
        #动态加载类方法
        sto_conn = getattr(sto,'connect')
        conn = sto_conn()
        sto_saving = getattr(sto,data_storage[data_storage['default_storage']]['saving'])
        sto_saving(conn,new_data)
    except MySQLdb.Error as e:
        error('MySQL error code:{' + str(e[0]) + '} msg:{' +  str(e[1]) + '}', 1)
    except myError as e:
        error(str(e),1)
    finally:
        conn.close()
    return new_data
    
def processing_data(last_data,new_data):
    #calculate
    debug('[processing_data]')
    #in here same as: if type(last_data) is int:
    debug('MySQL Datatype is ' + running_option['data_type'])
    data_class = type_binding[running_option['data_type']]['class']
    data_func = type_binding[running_option['data_type']]['binding']
    data_argv = type_binding[running_option['data_type']]['argv']
    ds = eval(data_class)()
    ds_func = getattr(ds,data_func)
    output_data = ds_func(last_data,new_data,data_argv)
    return output_data

    
def output_data(data):
    #output define type
    debug('[output_data]: ' + str(data))
    debug(output_binding[running_option['output_type']]['class'])
    out = eval(output_binding[running_option['output_type']]['class'])()
    out_func = getattr(out,output_binding[running_option['output_type']]['binding'])
    out_func(data)

def loading_config():
    "return config hash"
    debug("[loading_config]")
    try:
        data_storage = {}
        #try find confige file
        #running_option['other_opts']['conf_file']
        if not os.path.exists(running_option['other_opts']['conf_file']):
            raise myError(running_option['other_opts']['conf_file'] + ' not existed')
        config = ConfigParser.ConfigParser()
        config.read(running_option['other_opts']['conf_file'])
        data_storage['default_storage'] = config.get('global','storage')
        data_storage['hostname'] = config.get('global','hostname')
        #根据配置文件来查找数据存储仓库的配置文件
        sSS = searchStorageStore(data_storage)
        #返回一份当前允许的存储仓库列表
        allow_storage = sSS.list_storage()
        if not data_storage['default_storage'] in allow_storage:
            raise myError(data_storage['default_storage'] + ' error, only allow ' + str(allow_storage))
        data_storage[data_storage['default_storage']] = {}
        #把配置文件中相关的配置项目读取到，并返回
        for option in config.options(data_storage['default_storage']):
            #print option + ':' + config.get(data_storage['default_storage'],option)
            data_storage[data_storage['default_storage']][option] = config.get(data_storage['default_storage'],option)
        #合并配置文件
        data_storage = sSS.search_store()
    except ConfigParser.NoSectionError as e:
        error(running_option['other_opts']['conf_file'] + ' value error, please check, ' + str(e),1)
    except myError as e:
        error(str(e),1)
    debug('loading config: ' + str(data_storage))
    return data_storage

        


def main():
    #获取命令行参数，检查参数，设置默认值
    get_opts()
    debug('show running options: ' + str(running_option))
    #获取目标mysql的连接对象
    mysql_conn = prepare_process()
    #读取配置文件，返回hash
    data_storage = loading_config()
    #从指定的数据存储仓库获得最后一次的数据
    last_data = get_last_data(data_storage)
    #从指定的Mysql获得最新的状态，并存储一份到数据存储仓库
    new_data = get_new_data(mysql_conn,data_storage)
    #格式化数据，根据命令行数据类别开关参数，来决定返回的数据内容，返回一个hash 
    out_data = processing_data(last_data,new_data)
    #根据命令行的输出开关来确定输出什么格式的数据
    output_data(out_data)
	


if __name__ == '__main__':
	main()

