#!/usr/bin/env python

#
#    Copyright (c) 2009 Corey Goldberg (corey@goldb.org)
#    License: GNU GPLv3
#
#    This file is part of PerfMetrics.
#    
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.  See the GNU General Public License 
#    for more details.
#


import ConfigParser
import logging
import os
import pythoncom
import threading
import time
import re
import lib.rrd as rrd
import lib.wmi as wmi
from subprocess import Popen, PIPE



def main():
    logger.info('perfmetrics started')       
    
    opts, host_infos = configure()
    
    last_run_stats = {}
    
    threads = []
    for host_info in host_infos:
        mp = MetricsProbe(host_info['host'], host_info['username'], host_info['password'], host_info['collectors'], opts, last_run_stats)
        mp.start()
        threads.append(mp)
    for t in threads:
        t.join()  # wait for threads to finish
        
    for k,v in sorted(last_run_stats.items()):
        print '%s, %s, %d' % (k[0], k[1], v)
    
    logger.info('perfmetrics finished')       
    

def configure():
    config = ConfigParser.ConfigParser()
    config.read('config.ini')
    
    opts = {}
    opts['graph_mins'] = [int(mins) for mins in config.get('global', 'graph_mins').split(',')]
    opts['graph_width'] = config.get('global', 'graph_width')
    opts['graph_height'] = config.get('global', 'graph_height')
    opts['log_results'] = config.get('global', 'log_results')
    opts['rrd_results'] = config.get('global', 'rrd_results')
    
    host_infos = []
    for section in config.sections():
        if section != 'global':
            host_info = {}
            host_info['host'] = config.get(section, 'host')
            host_info['username'] = config.get(section, 'username')
            host_info['password'] = config.get(section, 'password')
            host_info['collectors'] = config.get(section, 'collectors').split(',')
            host_infos.append(host_info)       
    return (opts, host_infos)
    


class MetricsProbe(threading.Thread):
    def __init__(self, host, username, password, collectors, opts, last_run_stats):
        threading.Thread.__init__(self)
        self.host = host
        self.username = username
        self.password = password
        self.collectors = collectors
        self.last_run_stats = last_run_stats
        self.graph_mins = opts['graph_mins']
        self.graph_width = opts['graph_width']
        self.graph_height = opts['graph_height']
        self.log_results = opts['log_results']
        self.rrd_results = opts['rrd_results']
                
        
    def run(self):
        pythoncom.CoInitialize()  # need this for multithreading COM/WMI
        
        try:
            sf = StatFetcher(self.host, self.username, self.password)
            self.collect_and_update(sf)
        except Exception, e:
            print 'error with wmi connection - %s - %s' % (self.host, str(e))
            logger.error('error with wmi connection - %s - %s' % (self.host, str(e).replace('\n', ' ')))
            
        if self.rrd_results == 'on':
            self.graph()


    def collect_and_update(self, sf):
        collector_map = {}
        collector_map['cpu_util'] = sf.get_cpu_util
        collector_map['cpu_util_maxcore'] = sf.get_cpu_util_maxcore
        collector_map['cpu_queue_length'] = sf.get_cpu_queue_length
        collector_map['cpu_context_switches'] = sf.get_cpu_context_switches
        collector_map['net_bits_total'] = sf.get_net_bits_total
        collector_map['net_bits_in'] = sf.get_net_bits_in
        collector_map['net_bits_out'] = sf.get_net_bits_out
        collector_map['mem_available_bytes'] = sf.get_mem_available_bytes
        collector_map['mem_cache_bytes'] = sf.get_mem_cache_bytes
        collector_map['mem_committed_bytes'] = sf.get_mem_committed_bytes
        collector_map['mem_pages'] = sf.get_mem_pages
        collector_map['mem_page_faults'] = sf.get_mem_page_faults
        collector_map['disk_queue_length_avg'] = sf.get_disk_queue_length_avg
        collector_map['disk_queue_length_current'] = sf.get_disk_queue_length_current
        collector_map['disk_bytes_transferred'] = sf.get_disk_bytes_transferred
        collector_map['uptime'] = sf.get_uptime
        
        for collector in self.collectors:
            try:
                stat = collector_map[collector]()
                #print '%s,%d,%s,%d' % (self.host, int(time.time()), collector, stat)
                logger.debug('%s|%s|%d' % (self.host, collector, stat))            
            except Exception, e:
                try:  # retry once on a collection failure
                    stat = collector_map[collector]()
                    # print '%s,%d,%s,%d' % (self.host, int(time.time()), collector, stat)
                    logger.debug('%s|%s|%d' % (self.host, collector, stat))           
                except Exception, e:
                    stat = None
                    print 'error collecting %s stats - %s - %s' % (collector, self.host, str(e))
                    logger.error('error collecting %s stats - %s - %s' % (collector, self.host, str(e).replace('\n', ' ')))
            if stat is not None:
                self.last_run_stats[(self.host, collector)] = stat
                if self.rrd_results == 'on':
                    try:  # update the rrd database
                        rrd_db = rrd.RRD('%s_%s.rrd' % (self.host, collector))
                        rrd_db.rrd_exe = 'lib\\rrdtool'
                        rrd_db.rrd_directory = 'db\\'
                        rrd_db.update(stat) 
                    except rrd.RRDException, e:
                        print e
                        logger.error(e)
                if self.log_results == 'on':
                    try:  # log results to csv
                        log_directory = 'csv\\'
                        if not os.path.exists(log_directory):
                            os.makedirs(log_directory)
                        log_name = '%s%s_%s.csv' % (log_directory, self.host, collector)
                        with open(log_name, 'a') as f:
                            f.write('%s,%i\n' % (time.asctime(), stat)) 
                    except Exception, e:
                        print e
                        logger.error(e)
            else:
                self.last_run_stats[(self.host, collector)] = 'n/a'
                

    def graph(self):
        if not os.path.exists('graphs'):
            os.makedirs('graphs')
        
        for collector in self.collectors:
            for mins in self.graph_mins:
                try:
                    rrd_db = rrd.RRD('%s_%s.rrd' % (self.host, collector))
                    rrd_db.rrd_exe = 'lib\\rrdtool'
                    rrd_db.rrd_directory = 'db\\'
                    rrd_db.graph_directory = 'graphs\\'
                    rrd_db.graph_width = self.graph_width
                    rrd_db.graph_height = self.graph_height            
                           
                    if collector == 'cpu_util':
                        rrd_db.vertical_label = 'CPU Utilization'
                        rrd_db.graph_color ='#FF6666'
                        rrd_db.graph(mins, upper_limit=100)
                        
                    if collector == 'cpu_util_maxcore':
                        rrd_db.vertical_label = 'CPU Util (max core)'
                        rrd_db.graph_color ='#FF3333'
                        rrd_db.graph(mins, upper_limit=100)
                        
                    if collector == 'cpu_queue_length':
                        rrd_db.vertical_label = 'CPU Run Queue Length'
                        rrd_db.graph_color ='#CC6666'
                        rrd_db.graph(mins)
                        
                    if collector == 'cpu_context_switches':
                        rrd_db.vertical_label = 'Context Switches per sec'
                        rrd_db.graph_color ='#FF9999'
                        rrd_db.graph(mins)
                            
                    if collector == 'net_bits_total':
                        rrd_db.vertical_label = 'Total Network I/O (bits/sec)'
                        rrd_db.graph_color ='#66FF66'
                        rrd_db.graph(mins)
                        
                    if collector == 'net_bits_in':
                        rrd_db.vertical_label = 'Inbound Network I/O (bits/sec)'
                        rrd_db.graph_color ='#99FF99'
                        rrd_db.graph(mins)
                        
                    if collector == 'net_bits_out':
                        rrd_db.vertical_label = 'Outbound Network I/O (bits/sec)'
                        rrd_db.graph_color ='#33FF33'
                        rrd_db.graph(mins)
                
                    if collector == 'mem_available_bytes':
                        rrd_db.vertical_label = 'Memory Available Bytes'
                        rrd_db.graph_color ='#9999CC'
                        rrd_db.graph(mins)
                        
                    if collector == 'mem_cache_bytes':
                        rrd_db.vertical_label = 'Memory Cache Bytes'
                        rrd_db.graph_color ='#6699CC'
                        rrd_db.graph(mins)
                        
                    if collector == 'mem_committed_bytes':
                        rrd_db.vertical_label = 'Memory Committed Bytes'
                        rrd_db.graph_color ='#CCCCFF'
                        rrd_db.graph(mins)
                        
                    if collector == 'mem_pages':
                        rrd_db.vertical_label = 'Memory Pages per sec'
                        rrd_db.graph_color ='#99CCFF'
                        rrd_db.graph(mins)
                        
                    if collector == 'mem_page_faults':
                        rrd_db.vertical_label = 'Memory Page Faults per sec'
                        rrd_db.graph_color ='#9999FF'
                        rrd_db.graph(mins)
                        
                    if collector == 'disk_queue_length_avg':
                        rrd_db.vertical_label = 'Average Disk Queue Length'
                        rrd_db.graph_color ='#CC99CC'
                        rrd_db.graph(mins)
                        
                    if collector == 'disk_queue_length_current':
                        rrd_db.vertical_label = 'Current Disk Queue Length'
                        rrd_db.graph_color ='#FFCCFF'
                        rrd_db.graph(mins)
                        
                    if collector == 'disk_bytes_transferred':
                        rrd_db.vertical_label = 'Disk Bytes Transferred per sec'
                        rrd_db.graph_color ='#FF99FF'
                        rrd_db.graph(mins)
                        
                    if collector == 'uptime':
                        rrd_db.vertical_label = 'Uptime (hours)'
                        rrd_db.graph_color ='#FF99FF'
                        rrd_db.graph(mins)
                
                except rrd.RRDException, e:
                    print e
                    logger.error(e)
                    
                    
                    
class StatFetcher(object):
    def __init__(self, computer, user, password):
        self.c = wmi.WMI(computer=computer, user=user, password=password)    

    def get_cpu_util(self):
        cpu_utils = [cpu.LoadPercentage for cpu in self.c.Win32_Processor()]
        for i, item in enumerate(cpu_utils):
            if item is None:  # replace None's with zero
                cpu_utils[i] = 0
        cpu_util = int(sum(cpu_utils) / len(cpu_utils))  # avg all cores/processors
        return cpu_util

    def get_cpu_util_maxcore(self):
        cpu_utils = [cpu.LoadPercentage for cpu in self.c.Win32_Processor()]
        for i, item in enumerate(cpu_utils):
            if item is None:  # replace None's with zero
                cpu_utils[i] = 0
        cpu_max = int(max(cpu_utils))  # max of all cores/processors
        return cpu_max
    
    def get_cpu_queue_length(self): 
        cpu_queue_length = sum([int(cpu.ProcessorQueueLength) for cpu in self.c.Win32_PerfRawData_PerfOS_System()])
        return cpu_queue_length

    def get_cpu_context_switches(self): 
        cpu_context_switches = sum([int(cpu.ContextSwitchesPerSec) for cpu in self.c.Win32_PerfRawData_PerfOS_System()])
        return cpu_context_switches
        
    def get_net_bits_total(self):
        total_bytes = sum([int(net_interface.BytesTotalPerSec) for net_interface in self.c.Win32_PerfRawData_Tcpip_NetworkInterface()])
        total_bits = total_bytes * 8
        return total_bits
        
    def get_net_bits_in(self):
        recv_bytes = sum([int(net_interface.BytesReceivedPerSec) for net_interface in self.c.Win32_PerfRawData_Tcpip_NetworkInterface()])
        recv_bits = recv_bytes * 8
        return recv_bits
        
    def get_net_bits_out(self):
        sent_bytes = sum([int(net_interface.BytesSentPerSec) for net_interface in self.c.Win32_PerfRawData_Tcpip_NetworkInterface()])
        sent_bits = sent_bytes * 8
        return sent_bits
           
    def get_mem_available_bytes(self):
        mem_available_bytes = sum([int(mem.AvailableBytes) for mem in self.c.Win32_PerfRawData_PerfOS_Memory()])
        return mem_available_bytes
        
    def get_mem_cache_bytes(self):
        mem_cache_bytes = sum([int(mem.CacheBytes) for mem in self.c.Win32_PerfRawData_PerfOS_Memory()])
        return mem_cache_bytes
        
    def get_mem_committed_bytes(self):
        mem_committed_bytes = sum([int(mem.CommittedBytes) for mem in self.c.Win32_PerfRawData_PerfOS_Memory()])
        return mem_committed_bytes
        
    def get_mem_pages(self):
        mem_pages = sum([int(mem.PagesPerSec) for mem in self.c.Win32_PerfRawData_PerfOS_Memory()])
        return mem_pages
        
    def get_mem_page_faults(self):
        mem_page_faults = sum([int(mem.PageFaultsPerSec) for mem in self.c.Win32_PerfRawData_PerfOS_Memory()])
        return mem_page_faults
    
    def get_disk_queue_length_avg(self):
        disk_queue_length_avg = sum([int(disk.AvgDiskQueueLength) for disk in self.c.Win32_PerfRawData_PerfDisk_PhysicalDisk()])
        return disk_queue_length_avg
        
    def get_disk_queue_length_current(self):    
        disk_queue_length_current = sum([int(disk.CurrentDiskQueueLength) for disk in self.c.Win32_PerfRawData_PerfDisk_PhysicalDisk()])
        return disk_queue_length_current

    def get_disk_bytes_transferred(self):
        disk_bytes_transferred = sum([int(disk.DiskBytesPerSec) for disk in self.c.Win32_PerfRawData_PerfDisk_PhysicalDisk()])
        return disk_bytes_transferred

    def get_uptime(self):
        secs_up = int([uptime.SystemUpTime for uptime in self.c.Win32_PerfFormattedData_PerfOS_System()][0])
        hours_up = secs_up / 3600
        return hours_up
        
        

               
if __name__ == '__main__':
    logger = logging.getLogger()
    hdlr = logging.FileHandler('./perf.log')
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.setLevel(logging.INFO)
    
    main()
