'''
Created on Jul 21, 2011

@author: Krystian Brachmanski
'''

import logging
import re
import blacklistHandler
import basicStatistics
import os

class StatParser:
    """
    Base class for parsing the statistics files
    
    @author: Krystian Brachmanski    
    @requires: logging
    """
    
    log = logging.getLogger('yapcg.perfparser.StatParser')    
    """
    Logger
    """
    
    def __init__(self):
        self.header = ""
        
    def processDataLineRegExp(self,matchedDataLine):
        """
        Dummy callback method implementation for processing a data line - shall be overloaded in subclasses.         
        @param matchedDataLine: the result of the getMatchedDataLine method
        @return: result of the operation (here False)
        """
        return False

    def detectHeader(self,line):
        """
        Dummy callback method implementation for detecting the header - shall be overloaded in subclasses.         
        @param line: the line from the input file
        @return: boolean. True in case the line is a header, false otherwise
        """
        return False;

    def getMatchedDataLine(self,line):
        """
        Dummy callback method implementation for parsing the data line - shall be overloaded in subclasses.         
        @param line: the line from the input file
        @return: Parsed line object or None if the line is not a data line
        """        
        return None

    def parseFile(self, filename, outFile=None):
        """
        Parse file and invoke the corresponding callback methods(@see: getMatchedDataLine, processDataLineRegExp, detectHeader)
        @attention: if the outFile is provided and output file will be created based on the input file but with filtered content    
        @param filename: the name of the file to be parsed
        @param outFile: the name of the output file (the processed file - header + data lines from the input file but without comments, blank lines, multiple headers lines
        """ 
        fh = open(outFile,'w')
        with open(filename) as f:
            for line in f:
                matchedDataLine = self.getMatchedDataLine(line)            
                if matchedDataLine != None:
                    self.log.debug("Matched data line : " + line.strip())
                    self.processDataLineRegExp(matchedDataLine)
                    fh.write(line)
                else:
                    if self.detectHeader(line):
                        if self.header == "" :
                            self.log.debug("Found header in file - initializing header for output files")
                            self.header = line
                            fh.write("# " + line)
                    else:
                        self.log.debug("Ignoring line : " + line.rstrip())
        
        fh.flush()
        fh.close()
                
class PidStatParser(StatParser):
    '''
    pidstat output file parser. Responsible to producing per process (pid, command) stats in pidstat format each in a separate file
    
    @author: Krystian Brachmanski
    @requires: logging, re, blackListHandler, directoryHandler
    '''
    
    log = logging.getLogger('yapcg.perfparser.PidStatParser')
    
    header_pattern="\# +(Time) +(PID) +(\%usr) +\%(system) +\%(guest) +(\%CPU) +(CPU) +(minflt\/s) +(majflt\/s) +(VSZ) +(RSS) +(\%MEM)( +(StkSize) +(StkRef))* +(kB_rd\/s) +(kB_wr\/s) +(kB_ccwr\/s) +(Command)"
    header_pattern_9_0_4="\# +Time +PID +\%usr +\%system +\%guest +\%CPU +CPU +minflt\/s +majflt\/s +VSZ +RSS +\%MEM +kB_rd\/s +kB_wr\/s +kB_ccwr\/s +Command"
    '''
    @cvar: the regular expression for matching the header line in the pidstat output file
    '''

    line_pattern = " *([0-9]+) +([0-9]+) +([0-9\.]+) +([0-9\.]+) +([0-9\.]+) +([0-9\.]+) +([0-9]+) +([0-9\.]+) +([0-9\.]+) +([0-9]+) +([0-9]+) +([0-9\.]+)( +([0-9]+) +([0-9]+))* +([\-0-9\.]+) +([\-0-9\.]+) +([\-0-9\.]+) +(.+)"
    '''
    @cvar: the regular expression for matching the data line in the pidstat output file
    '''

    keys = [ "pid", "process_name", "cpu", "memory", "kB_rd\/s",  "kB_wr\/s"]
    '''
    @cvar: the column names that are of interest to the parser
    '''
    columns = { keys[0] : 2,
                keys[1] : 19, 
                keys[2] : 6,
                keys[3] : 12,
                keys[4] : 16,
                keys[5] : 17
              }
    '''
    @cvar: the dictionary providing the mapping between the name of the column and actual column position in the pidstat file
    '''

    def __init__(self, processNumber, processBlackList, dirHandler):
        '''
        Copnstructor
        @param processBlackList: the list of blacklisted processes
        @param dirHandler: the instance of directoryHandler to provide appropriate directories
        '''
        StatParser.__init__(self) 
        self.prog = re.compile(self.line_pattern)
        '''
        @ivar: the compiled pattern for matching the data lines
        ''' 
        self.prog2 = re.compile(self.header_pattern)
        '''
        @ivar: the compiled pattern for matching the header lines
        ''' 
	self.prog_9_0_4 = re.compile(self.header_pattern_9_0_4)
        '''
        @ivar: the compiled pattern for matching the header lines for older versions of sysstat (9.0.4)
        '''
        self.blackListHandler = blacklistHandler.blacklistHandler(processBlackList)
        '''
        @ivar: the instance for handling the blacklists
        '''
        self.filemap = { }
        '''
        @ivar: the dictionary storing the map of the output files
        '''
        self.dirHandler = dirHandler
        '''
        @ivar: the instance of directory handler
        '''
        self.cpuTopProcessList = basicStatistics.TopProcessList(processNumber)
        '''
        @ivar: the list of top n processes consuming the most of the CPU (max value taken into account) 
        '''
        self.memTopProcessList = basicStatistics.TopProcessList(processNumber)
        '''
        @ivar: the list of top n processes consuming the most of the memory (max value taken into account) 
        '''
        self.ioTopRdProcessList = basicStatistics.TopProcessList(processNumber)
        '''
        @ivar: the list of top n processes consuming the most of the IO (max value taken into account) 
        '''
        self.ioTopWrProcessList = basicStatistics.TopProcessList(processNumber)
        '''
        @ivar: the list of top n processes consuming the most of the IO (max value taken into account) 
        '''

    def detectHeader(self,line):
#        return self.prog2.match(line) != None
	return self.prog2.match(line)
#        if matched == None:
#	    return False 

#	matched = self.prog_9_0_4.match(line)
#        for k in matched.groups(): 
       
	 
#	self.log.info(matched.group(19))
#	if matched.lastindex == 14:
#	    self.log.info("Old format found")
#	for m in matched.groups():
#	    self.log.info(matched.groupdict())
#	return True
    
    def getMatchedDataLine(self,line):
        return self.prog.match(line)

    # Return the unique identifed for line: pid + command arg
    def getLineUniqueIdentifier(self,matchedRegExp):
        comm = matchedRegExp.group(self.columns["process_name"]).replace("/", "_");
        return ''.join([matchedRegExp.group(self.columns["pid"]), "_", comm])

    def getFileDescriptor(self,name):

        if name in self.filemap.keys():
            return self.filemap[name]
        else:
            self.filemap[name] = self.createNewFile(name)
            return self.filemap[name]

    def createNewFile(self,name):
        filename = self.dirHandler.getDataFileName(name)
        self.log.debug("Creating new file : " + filename)
        fh = open(filename , "w") 
        fh.write(self.header)
        return fh 

    def checkMaxCpu(self, value, id):    
        self.cpuTopProcessList.checkHeap(value,id)

    def checkMaxMem(self, value, id):    
        self.memTopProcessList.checkHeap(value,id)

    def checkMaxIoRead(self, value, id):    
        self.ioTopRdProcessList.checkHeap(value,id)

    def checkMaxIoWrite(self, value, id):    
        self.ioTopWrProcessList.checkHeap(value,id)

    def processDataLineRegExp(self,matchedRegExp):
        
        id = self.getLineUniqueIdentifier(matchedRegExp)
    
        if not self.blackListHandler.processItem(id):
            fileHandle = self.getFileDescriptor(id);
    
            self.checkMaxCpu(matchedRegExp.group(self.columns["cpu"]), id)
            self.checkMaxMem(matchedRegExp.group(self.columns["memory"]), id)
            self.checkMaxIoRead(matchedRegExp.group(self.columns["kB_rd\/s"]), id)
            self.checkMaxIoRead(matchedRegExp.group(self.columns["kB_wr\/s"]), id)
    
            fileHandle.write(matchedRegExp.group(0) + os.linesep)
        else:            
            self.blackListHandler.addNewCpuItem(id, matchedRegExp.group(self.columns["cpu"]))
            self.blackListHandler.addNewMemItem(id, matchedRegExp.group(self.columns["memory"]))
        
    def closeAllFiles(self):
        self.log.debug("About to close all files")
        for key in self.filemap:
            self.log.debug("Closing file for key: " + key)
            self.filemap[key].flush()
            self.filemap[key].close()        
        self.log.info(self.blackListHandler.getCpuStatistics())
        
        
    def parseFile(self, filename, outFile):
        StatParser.parseFile(self, filename, outFile)
        self.closeAllFiles()        
   
    def getTopCpuProcessList(self):
        return self.cpuTopProcessList.getProcessList()
    
    def getTopMemProcessList(self):
        return self.memTopProcessList.getProcessList()
    
    def getFileMap(self):
        return self.filemap

    def getBlacklistStatistics(self):
        return self.blackListHandler.getStatistics()
    
    def getBlacklistCpuStatistics(self):
        return self.blackListHandler.getCpuStatistics()

    def getBlacklistMemStatistics(self):
        return self.blackListHandler.getMemStatistics()

class SarCpuParser(StatParser):

 
    keys = [ "user", "system", "iowait", "total"]
    columns = { keys[0] : 3,
                keys[1] : 5,
                keys[2] : 6,
                keys[3] : 8
              }
        
    line_pattern = "([0-9:]+) +([a-zA-Z0-9]+) +([0-9\.]+) +([0-9\.]+) +([0-9\.]+) +([0-9\.]+) +([0-9\.]+) +([0-9\.]+)"
    header_pattern = "[0-9:]+ +CPU +\%user +\%nice +\%system +\%iowait +\%steal +\%idle"
    
    def __init__(self):       
        StatParser.__init__(self) 
        self.header = ""
        self.statistics = { }
        for i in self.keys:            
            self.statistics[i] = basicStatistics.BasicStatistics()
        self.prog = re.compile(self.line_pattern)
        self.prog2 = re.compile(self.header_pattern)
        
    def getMatchedDataLine(self,line):
        return self.prog.match(line)
    
    def detectHeader(self,line):
        return self.prog2.match(line) != None
    
    #Shall be replaced with groups in the regular expressions
    def processDataLineRegExp(self,matchedRegExp):
        
        for i in self.keys:
            if (i == "total"):
                value = 100 - float(matchedRegExp.group(self.columns[i]))
            else:
                value = float(matchedRegExp.group(self.columns[i]))    
            self.statistics[i].addSample(value)
            
        return   
    
    def getStatistics(self):
        return self.statistics
    
class SarMemParser(SarCpuParser):
    """
    Class for parsing the memory statistics files produced by 'sar' command
    
    @author: Krystian Brachmanski    
    @requires: 
    """
    
    def __init__(self):
        """
        Constructor
        """
	line_pattern = "([0-9:]+) +([a-zA-Z0-9]+) +([0-9\.]+) +([0-9\.]+) +([0-9\.]+) +([0-9\.]+)( +([0-9\.]+) +([0-9\.]+))*"
        self.header_pattern = "[0-9:]+ +kbmemfree +kbmemused +\%memused +kbbuffers +kbcached +kbcommit +%commit( +kbactive +kbinact)*"
        '''
        @ivar: the regular expression for matching the header line in the memory sar output file
        '''
        SarCpuParser.__init__(self)
        self.keys = [ "memused [\%]", "kbmemfree", "kbbuffers", "kbcached" ]
        '''
        @ivar: the column names that are of interest to the parser
        '''
        self.columns = { self.keys[0] : 4, 
			 self.keys[1] : 2,
             self.keys[2] : 5,
             self.keys[3] : 6
             
        }
        '''
        @ivar: the dictionary providing the mapping between the name of the column and actual column position in the memory sar file
        '''
    
    def processDataLineRegExp(self,matchedRegExp):            
        return
