#!/usr/bin/python

import sys
import os
import glob
from math import sqrt,pow
PlotConfig= '../Plot/'
sys.path.append(PlotConfig)

#######################################################################
## A simple script to calculate results
## with log_eff files
## This is embolded in this analysis package
## i) new feature: add statistic error for MC, 10/20/2010
########################################################################

def cal_result(prefix='log_',set='NOM',sig='',output='total_results',**kw):

    ##################################################
    # Grab the cut flow steps from file 'log_format'
    ##################################################
    cuts=[]
    cuts.append('TotalWeighted')
    try: f1=open('log_format','r')
    except IOError:
        print 'ERROR ==> This file can not be open log_format !!!'
        sys.exit(1) 
    for line in f1.readlines():
        if line[-1]=='\n': line_re=line[:-1]
        else: line_re=line
        cuts.append(line_re)
    print 'INFO ==> Get the cuts are: ',cuts

    ###########################################################
    # make all the dataset folders we want to use in a list ###
    ###########################################################
    try: input_dataset_list=open('list_dataset_folder','w')
    except IOError:
        print 'ERROR ==> Can not open file "list_dataset_folder" to write'
        sys.exit(1)    
    for folder in os.listdir('.'):
        if not 'data' in folder and not 'mc' in folder: continue
        if os.path.isdir(folder): input_dataset_list.write('%s\n' % folder)
    input_dataset_list.close()
    print 'INFO ==> Successfully put all data set folder into "list_dataset_folder"'

    #########################################################
    # parse the config file, return all info in dict_info ### 
    #########################################################
    # Get Path for plot config file
    #config_path=os.environ['PATH_PlotConfig']
    #config_file='%s/plot_configure' % config_path
    config_file='../Plot/plot_configure'
    if not os.path.isfile(config_file):
        print 'ERROR ==> File "%s" expected, but not exist !!!' % config_file
        sys.exit(1)
    # Parse config file
    from plot_config import plot_config
    dict_info=plot_config(configfile=config_file)
    # Get Dataset List from dict_info
    datasets=dict_info['Datasets']
    lumi=float(dict_info['IntLumi'])

    ########################################################
    # Read Cut flows and add them into dict_info as well ###
    # For MC should do the normalization at the meantime ###
    ########################################################
    print 'INFO ==> Read Cut flows'
    # Loop datasets one by one, 
    for dataset in datasets:
        ismc=0
        if not 'Data' in dataset: ismc=1
        # Get the cut flows for each file inside one dataset
        for this_dataset in dict_info[dataset].keys():
            if this_dataset=='Color' or this_dataset=='Corr' or this_dataset=='OverLay': continue                   
            # for data, here should be directly foldername; for mc, still sub structure exist, MCID: foldernames...
            # here make a list of all the folders,
            list_folder=[]
            if ismc==0: list_folder.append(this_dataset)
            if ismc==1:
                for subfolder in dict_info[dataset][this_dataset].keys():
                    list_folder.append(subfolder) 
            # variables to store the corr factors for mc
            global_corr,this_corr,xs,fe,k_factor,d3pd_weighted=0.,0.,0.,0.,0.,0. 
            # loop the folders list one by one and merge results  
            dict_temp={} 
            for folder in list_folder:
                # Get the log_file name we want to retrieve cut flows
                logfiles=glob.glob(''.join([folder,'/',prefix,'*',set,'*']))
                if not logfiles:
                    print 'ERROR ==> Not log file match pattern %s in %s !' % (prefix,folder)
                    sys.exit(1)
                logfile=logfiles[0]
                # open log file
                try: flog=open(logfile,'r')
                except IOError:
                    print 'ERROR ==> This file can not be open %s !!!' % logfile
                    sys.exit(1)
                # save cut flow into a temp dict
                for this_line in flog.readlines():
                    # get cut number
                    if this_line[-1]=='\n': line=this_line[:-1]
                    else: line=this_line
                    line_info=line.split('=')
                    if len(line_info)<2:
                        print 'ERROR ==> %s -> %s -> %s: empty cut, "%s"' % (dataset,folder,logfile,line)
                        continue
                    #cut,number=line_info[0],float(line_info[1])
                    cut,str=line_info[0],line_info[1]
                    var=float(str.split('+/-')[0])
                    err=float(str.split('+/-')[1])
                    
                    if not dict_temp.has_key(cut): dict_temp[cut]={}
                    if not dict_temp[cut].has_key('var'): dict_temp[cut]['var']=var
                    else: dict_temp[cut]['var']+=var
                    if not dict_temp[cut].has_key('err'): dict_temp[cut]['err']=err
                    #else: dict_temp[cut]['err']+= sqrt(pow(dict_temp[cut]['err'],2) + pow(err,2))
                    else: dict_temp[cut]['err']= sqrt(pow(dict_temp[cut]['err'],2) + pow(err,2))
                    
                # get info for mc corr factors  
                print folder 
                if ismc==1:
                    if not dict_info[dataset][this_dataset][folder].has_key('Nevts(Weight)'): continue
                    # accumulater total weighted events for mcs with same mcid
                    d3pd_weighted+=float(dict_info[dataset][this_dataset][folder]['Nevts(Weight)'])
                    if d3pd_weighted<=0.:
                        print 'ERROR ==> D3PD Weighted Entry <=0 ?'
                        sys.exit(1)
                    xs=float(dict_info[dataset][this_dataset][folder]['Xsection(pb)'])
                    fe=float(dict_info[dataset][this_dataset][folder]['FilterEff(gen)'])
                    k_factor=float(dict_info[dataset][this_dataset][folder]['k-factor'])
                    global_corr=float(dict_info['MCScale'])
                    this_corr=float(dict_info[dataset]['Corr'])
            # caculate statistic errors, normalize mc events and add info into dict_info
            for cut in dict_temp.keys():
                number,error,scale=dict_temp[cut]['var'], dict_temp[cut]['err'],1.0
                #if number>0: error=1.0/sqrt(number)
                if ismc==1: 
                    scale=lumi*1000*xs*fe*k_factor*global_corr*this_corr/d3pd_weighted
                    dict_temp[cut]['var']=number*scale
                dict_temp[cut]['err']=error*scale

            dict_temp['TotalWeighted']={}
            if ismc==1:
                dict_temp['TotalWeighted']['var']=d3pd_weighted
                dict_temp['TotalWeighted']['err']=sqrt(d3pd_weighted)
            else: 
                dict_temp['TotalWeighted']['var']=dict_temp[cuts[1]]['var']
                dict_temp['TotalWeighted']['err']=sqrt(dict_temp[cuts[1]]['var'])
            dict_info[dataset][this_dataset]['CutFlow']= dict_temp

    #####################
    # Combine CutFlows ## 
    #####################
    print 'INFO ==> Combine cut flows'
    dict_cutflow={}
    for dataset in datasets:
        dict_temp={}
        for this_dataset in dict_info[dataset].keys():
            if this_dataset=='Color' or this_dataset=='Corr' or this_dataset=='OverLay': continue
            for key in dict_info[dataset][this_dataset]['CutFlow'].keys():
                this_dict=dict_info[dataset][this_dataset]['CutFlow'][key]
                value=this_dict['var']
                error=this_dict['err']
                if dict_temp.has_key(key): 
                    dict_temp[key]['var']+=value
                    dict_temp[key]['err']=sqrt(pow(dict_temp[key]['err'],2)+pow(error,2))
                else:
                    dict_temp[key]={}
                    dict_temp[key]['var']=value
                    dict_temp[key]['err']=sqrt(error*error)
        dict_cutflow[dataset]=dict_temp
        # to sum mc 
        if not dict_cutflow.has_key('MC'):  dict_cutflow['MC']={} # data will be ignored here, for dataset="Data" here
        else:
            for key in dict_temp.keys():
                if dict_cutflow['MC'].has_key(key):
                    dict_cutflow['MC'][key]['var']+=dict_temp[key]['var']
                    dict_cutflow['MC'][key]['err']=sqrt(dict_cutflow['MC'][key]['err']*dict_cutflow['MC'][key]['err']\
                                                          +dict_temp[key]['err']*dict_temp[key]['err'])
                else:
                    dict_cutflow['MC'][key]={}
                    dict_cutflow['MC'][key]['var']=dict_temp[key]['var']
                    dict_cutflow['MC'][key]['err']=sqrt(dict_temp[key]['err']*dict_temp[key]['err'])        
        # to sum mc background
        if dataset!=sig:
            if not dict_cutflow.has_key('Bkg'):  dict_cutflow['Bkg']={}
            else: 
                for key in dict_temp.keys():
                    if dict_cutflow['Bkg'].has_key(key): 
                        dict_cutflow['Bkg'][key]['var']+=dict_temp[key]['var']
                        dict_cutflow['Bkg'][key]['err']=sqrt(dict_cutflow['Bkg'][key]['err']*dict_cutflow['Bkg'][key]['err']\
                                                          +dict_temp[key]['err']*dict_temp[key]['err'])
                    else: 
                        dict_cutflow['Bkg'][key]={}
                        dict_cutflow['Bkg'][key]['var']=dict_temp[key]['var']
                        dict_cutflow['Bkg'][key]['err']=sqrt(dict_temp[key]['err']*dict_temp[key]['err'])
    
    ###################################################################
    # check if the dict_cutflow to see if all things are properly done
    ###################################################################
    print 'INFO ==> Check the combined cutflow:'
    for key in dict_cutflow.keys():
         is_abnormal=1
         for key1 in dict_cutflow[key].keys():
             if key1=='Color' or key1=='Corr' or key1=='OverLay': continue
             is_abnormal=0
         if is_abnormal==1: print '     ==> ERROR: No DataSet loaded for "%s", please check your setting' % key

    #####################################################################
    # Output results, using the combined dictionary
    #####################################################################
    # make diretories to write into log file
    if not os.path.isdir(set): os.mkdir(set)
    os.chdir(set)

    print 'INFO ==> Now print out CutFlows into file: "%s"' % output
    # extend List 'datasets' by add items 'Bkg'
    datasets.append('Bkg')
    # open the output file 
    try: fout=open(output,'w')
    except IOError:
        print 'ERROR ==> This file can not be open %s !!!' % output
        sys.exit(1)   
    # write cut sequence and data set sequence
    fout.write('%-20s' % 'cutflow')
    for i in datasets: fout.write('%-14s' % i)
    fout.write('%-14s' % 'Data/MC')
    fout.write('%-14s\n' % 's/b')
    
    for cut in cuts:
        # One line for cut number
        fout.write('%-20s' % cut)    
        for dataset in datasets:
            if not dict_cutflow[dataset]: fout.write('%-14s' % 'XXXX')
            else:
                cutnumber=dict_cutflow[dataset][cut]['var']
                fout.write('%-14.7e' % cutnumber)

        dataovermc=0
        if dict_cutflow['MC'] and dict_cutflow['Data'] and dict_cutflow['MC'][cut]['var'] !=0:
            dataovermc=dict_cutflow['Data'][cut]['var']/dict_cutflow['MC'][cut]['var']
            fout.write('%-14.7e' % dataovermc) 

        sigoverbg=0
        if dict_cutflow['Bkg'] and dict_cutflow[sig] and dict_cutflow['Bkg'][cut]['var'] !=0:
            sigoverbg=dict_cutflow[sig][cut]['var']/dict_cutflow['Bkg'][cut]['var']
            fout.write('%-14.7e' % sigoverbg)
        fout.write('\n')
        # One line for statistic errors
        fout.write('%-20s' % '+/-')
        for dataset in datasets:
            if not dict_cutflow[dataset]: fout.write('%-14s' % 'XXXX')
            else: 
                cuterror=dict_cutflow[dataset][cut]['err']
                fout.write('%-14.7e' % cuterror)
        fout.write('\n')

    # write the cut efficiency into output too, relevant eff
    fout.write('\n')
    fout.write('\n')
    fout.write('%-20s' % 'cuteff')
    for i in datasets: fout.write('%-12s' % i)
    fout.write('%-12s\n' % 's/b')

    for i in range(len(cuts)):
        fout.write('%-20s' % cuts[i])
        for dataset in datasets:
            cutnumber_this,cutnumber_pre=1,1
            cut_eff=0.
            if i==0 or i==1 or i==2: cut_eff=1.
            else:
                if dict_cutflow[dataset]: 
                    cutnumber_this=dict_cutflow[dataset][cuts[i]]['var']
                    cutnumber_pre=dict_cutflow[dataset][cuts[i-1]]['var']
                if cutnumber_pre: cut_eff=cutnumber_this/cutnumber_pre
            fout.write('%-10.4e  ' % cut_eff)
        fout.write('\n')

    # done
    print 'INFO ==> All done!'
    fout.close()

    # change dir to parent directory
    os.chdir("../")

