#!/usr/bin/env python

###################################################################################################
#
# MODULE:	m.fuzzy.validation
# AUTHOR:	Thomas Leppelt
# PURPOSE:      Selection, calibration and validation of parameter combinations for fuzzy models.
#
###################################################################################################

#%module
#% description: Selection, calibration and validation of parameter combinations for fuzzy models.
#% keywords: Miscellaneous
#% keywords: Fuzzy model,TAG2E
#%end
#%option G_OPT_F_INPUT
#% description: Name of input vector map for fuzzy model input
#% required : yes
#% guisection: Files & format
#%end
#%option G_OPT_F_OUTPUT
#% description: Name of output vector file
#% required : no
#% guisection: Files & format
#%end
#%option
#% key: target
#% type : string
#% description: Select name for target variable
#% required : yes
#% multiple: no
#% guisection: Files & format
#%end
#%option
#% key: weightfactor
#% type : string
#% description: Select name for weighting variable
#% required : no
#% multiple: no
#% guisection: Files & format
#% answer: None
#%end
#%option
#% key: splitfactor
#% type : string
#% description: Select factor name for dataset seperation 
#% required : no
#% multiple: no
#% guisection: Files & format
#%end
#%option
#% key: samplingfactor
#% type : string
#% description: The name of the column with ids for bootstrap aggregation selection
#% required : no
#% multiple: no
#% guisection: Files & format
#% answer: None
#%end
#%option
#% key: parameters
#% type : string
#% description: List of driving parameters in input attribute table for selection  
#% required : yes
#% multiple: yes
#% guisection: Files & format
#%end
#%option
#% key: uncertainties
#% type : double
#% description: The uncertainty of parameters in same order to be used for monte carlo simulation
#% required : no
#% multiple: yes
#% guisection: Files & format
#%end
#%option
#% key: rulelimit
#% type : double
#% description: The rule limit specifies the minimum percentage of data that should be located in a single rule. 
#% required : no
#% multiple: no
#% guisection: Files & format
#% answer: 20
#%end
#%option
#% key: ensembles
#% type : integer
#% description: Number of model ensembles extracted by bootstrapping
#% required : no
#% multiple: no
#% guisection: Files & format
#% answer: 50
#%end
#%option
#% key: iterations
#% type : integer
#% description: The maximum number of iterations
#% required : no
#% multiple: no
#% guisection: Files & format
#% answer: 50000
#%end
#%option
#% key: fuzzysets
#% type : integer
#% description: The number of fuzzy sets to be used for calibration for each factor: in case of 4 factors each of 2 fuzzy sets: 2,2,2,2
#% required : no
#% multiple: yes
#% guisection: Files & format
#%end
#%flag
#% key: a
#% description: Use all parameters in input attribute table for calibration
#%end
#%flag
#% key: b
#% description: Use boostrap aggregation (bagging) for input data selection
#%end
#%flag
#% key: e
#% description: Activate crossvalidation by given splitfactor
#%end
#%flag
#% key: c
#% description: Transform categorical variable to numerical weight factor for selection 
#%end
#%flag
#% key: w
#% description: Using weight factor for calibration
#%end
#%flag
#% key: m
#% description: Activate Monte Carlo simulation for uncertainty estimation
#%end

import sys
import os
import grass.script as grass
import numpy
import subprocess
import multiprocessing as multi
import re
import csv
import string
from vtk import *
from libvtkTAG2ECommonPython import *
from libvtkTAG2EFilteringPython import *
from libvtkGRASSBridgeIOPython import *
from libvtkGRASSBridgeCommonPython import *
from libvtkGRASSBridgeRSpaceTimePython import *
#from libvtkGRASSBridgeTemporalPython import *
import MetaModel

workers = multi.cpu_count()

def ParameterXMLRange(input, weightfactor = 'None'): 
  # Import model XML description file and create dictionary with model parameters
  # and their specific max and min values. 
  fuzzyxml = open(input, 'r')
  fuzzylist = fuzzyxml.read()
  fuzzyxml.close()
  fuzzymodel = vtkXMLUtilities.ReadElementFromString(fuzzylist)
  element_number = fuzzymodel.GetNumberOfNestedElements()
  factor_dict = {}
  if weightfactor == 'None':
    for i in range(element_number - 1):
      element = fuzzymodel.GetNestedElement(i)
      element_name = element.GetAttribute('name')
      element_max = element.GetAttribute('max')
      element_min = element.GetAttribute('min')
      factor_dict[element_name] = [element_max,element_min]
  else:
    models = fuzzymodel.GetNestedElement(1)
    element_number = models.GetNumberOfNestedElements()
    for i in range(element_number - 1):
      element = models.GetNestedElement(i)
      element_name = element.GetAttribute('name')
      element_max = element.GetAttribute('max')
      element_min = element.GetAttribute('min')
      factor_dict[element_name] = [element_max,element_min]
  return(factor_dict)

def ParameterComparisonFlag(index, xmldict, parameters, parameterdict):
  # Compute flags for validation data qualitiy.
  # Possibly values:
  #  0: At least one validation parameter is outside the model calibration 
  #     range and no missing values is detected.
  #  1: Validation parameters are inside the model calibration range and no
  #     missing values are detected.
  #  2: At least one validation parameter is a missing values and no outliers
  #     are detected.
  #  3: At least one validation parameter is a missing values and one outlier
  #     is detected.
  parameter_list = parameters.split(',')
  flag_list = []
  for para in parameter_list:
    max = float(xmldict[para][0])
    min = float(xmldict[para][1])
    values = parameterdict[index]
    para_index = parameter_list.index(para)
    value_index = float(values[para_index])
    if (value_index > max or value_index < min) and value_index != 9999:
      flag = 0
    elif value_index == 9999:
      flag = 2
    else:
      flag = 1
    flag_list.append(flag)
    outlier_values = sum(i < 1 for i in flag_list)
    missing_values = sum(i > 1 for i in flag_list)
    if missing_values == 0 and outlier_values != 0:
      validation_flag = 0
    elif missing_values != 0 and outlier_values == 0:
      validation_flag = 2
    elif missing_values != 0 and outlier_values != 0:
      validation_flag = 3
    else:
      validation_flag = 1
  return(validation_flag)

#m.fuzzy.validation input=moorprojekt_ghg_europe_ecad splitfactor=landuse target=n2oaverage parameters=wt,cn20,ninput,tg_annual fuzzysets=3,2,2,3 iterations=1000 ensembles=2 -e
#m.fuzzy.validation input=moorprojekt_ghg_europe_ecad_weight splitfactor=landuse target=n2oaverage parameters=wt,cn20,ninput,tg_annual fuzzysets=3,2,2,3 iterations=1000 ensembles=2 -e

def MonteCarloSim(key, parameters, parametername, resultvalidationdictpara, 
                  uncertainties, na_value = 9999, weightfactor = 'None', 
                  weightnum = 0):
  # Application of Monte Carlo Simulation for uncertainty estimation of model results.

  # Set up dummy data points to select object type.
  points = vtkPoints()
  for i in range(5):
      for j in range(5):
        points.InsertNextPoint(i, j, 0)
  data = vtkDoubleArray()
  data.SetNumberOfTuples(25)
  data.SetName("data")
  data.FillComponent(0,3)
  ds = vtkPolyData()
  ds.Allocate(5,5)
  ds.GetPointData().SetScalars(data)
  ds.SetPoints(points)

  # Import fuzzy model XML representation file as object.
  fuzzyxml = open(parametername, 'r')
  fuzzylist = fuzzyxml.read()
  fuzzyxml.close()

  fuzzymodel = vtkXMLUtilities.ReadElementFromString(fuzzylist)
  if weightfactor == 'None':
    fisc = vtkTAG2EFuzzyInferenceModelParameter()
    fisc.SetXMLRepresentation(fuzzymodel)
    model = vtkTAG2EFuzzyInferenceModel()
    model.SetModelParameter(fisc)
  else:
    fisc = vtkTAG2EFuzzyInferenceModelParameter()
    weight = vtkTAG2EWeightingModelParameter()
    
    weight_model = fuzzymodel.GetNestedElement(0)
    weight_model.GetName()
    fisc_model = fuzzymodel.GetNestedElement(1)
    fisc_model.GetName()
    fisc.SetXMLRepresentation(fisc_model)
    weight.SetXMLRepresentation(weight_model)

    model = vtkTAG2EFuzzyInferenceModel()
    model.SetModelParameter(fisc)   
    
    modelW = vtkTAG2EWeightingModel()
    modelW.SetModelParameter(weight)
    
    meta = MetaModel.MetaModel()
    meta.InsertModelParameter(model, fisc_model, "vtkTAG2EFuzzyInferenceModel")
    meta.InsertModelParameter(modelW, weight, "vtkTAG2EWeightingModel")
    meta.SetLastModelParameterInPipeline(modelW, weight, "vtkTAG2EWeightingModel")
    
  # Split parameter list.
  parameter_list = parameters.split(',')

  # Split parameter list.
  uncertainties_list = uncertainties.split(',')
  if len(uncertainties) != 0:
    [float(i) for i in uncertainties_list]
  
  para_num = resultvalidationdictpara[key]
  
  # Compute means, median, standard deviation for model parameters.
  para_mean = list()
  para_median = list()
  para_sd = list()
  para_val = resultvalidationdictpara.values()
  para_list = [list([float(i) for i in item]) for item in para_val]
  para_split = zip(*para_list)
  para_split_narem = [[i for i in list(item) if i != na_value] for item in para_split]
  for i in para_split_narem:
      para_mean.append(numpy.mean(i))
      para_median.append(numpy.median(i))
      para_sd.append(numpy.std(i))
  
  # Create data distribution xml representation file.
  root  = vtk.vtkXMLDataElement()
  root.SetName("DataDistributionDescription")
  root.SetAttribute("xmlns", "http://tag2e.googlecode.com/files/DataDistributionDescription")
  root.SetAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")
  root.SetAttribute("xsi:schemaLocation", "http://tag2e.googlecode.com/files/DataDistributionDescription http://tag2e.googlecode.com/files/DataDistributionDescription.xsd")
  root.SetAttribute("name", "Simulation")

  for para in parameter_list:
      para_index = parameter_list.index(para)
      dfn = vtkXMLDataElement()
      dfn.SetName("Norm")
      if (float(para_num[para_index]) == na_value):
	  mean_val = para_mean[para_index]
          if len(uncertainties) != 0:
	    sd_val = float(uncertainties_list[para_index])
	  else:
	    sd_val = para_sd[para_index]
      else:
	  mean_val = float(para_num[para_index])
          if len(uncertainties) != 0:
	    sd_val = float(uncertainties_list[para_index])
          else:
	    sd_val = para_sd[para_index]  
      dfn.SetDoubleAttribute("mean", mean_val)
      dfn.SetDoubleAttribute("sd", sd_val)
      varn = vtkXMLDataElement()
      varn.SetName("Variable")
      varn.SetAttribute("name", para)
      varn.SetAttribute("type", "norm")
      varn.AddNestedElement(dfn)
      root.AddNestedElement(varn)
      
  if weightfactor != 'None':
    dfn = vtkXMLDataElement()
    dfn.SetName("Unif")
    dfn.SetDoubleAttribute("min", int(weightnum))
    dfn.SetDoubleAttribute("max", int(weightnum))
    varn = vtkXMLDataElement()
    varn.SetName("Variable")
    varn.SetAttribute("name", str(weightfactor)) 
    varn.SetAttribute("type", "unif")
    varn.AddNestedElement(dfn)
    root.AddNestedElement(varn)
    
  # Create data distribution xml file.
  ddd = vtkTAG2EAbstractModelParameter()
  ddd.SetFileName("/tmp/MCWFISSimpleDataDistributionDescription1.xml")
  ddd.SetXMLRepresentation(root)
  ddd.Write()
  
  # Create Monte Carlo analyser object.
  analyser = vtkTAG2EModelMonteCarloVariationAnalyser()
  analyser.SetDataDistributionDescription(ddd)
  analyser.SetModel(model)
  analyser.SetNumberOfRandomValues(1000)
  analyser.SetMaxNumberOfIterations(2000)
  analyser.SetBreakCriterion(0.001)
  analyser.SetInput(ds)
  analyser.Update()   
  output = analyser.GetOutput()
  output_range = output.GetFieldData().GetArray(model.GetResultArrayName()).GetRange()
  output_min = output_range[0]
  output_max = output_range[1]
  
  # Create R interface.
  riface = vtkRInterfaceSpatial()
  riface.AssignVTKDataArrayToRVariable(output.GetFieldData().\
         GetArray(model.GetResultArrayName()), model.GetResultArrayName())
  # Save the workspace for testing
  script = "quant <- quantile(result,c(0, 0.05, 0.25, 0.5, 0.75, 0.95, 1))"
  #print script
  riface.EvalRscript(script, True)
  result = riface.AssignRVariableToVTKDataArray("quant")
  result_array = list()
  for i in range(7):
    result_array.append(result.GetValue(i))

  return(result_array)

def crosscalibration(input, inputvector, validation,  parameters, uncertainties, 
                     target, ensembles, iterations, fuzzysets, weightfactor = 'None', 
                     weightnum = 0, mcsim = False, boot = False, 
                     samplingfactor = 'None', rulelimit = '20'):
  resultlistcalibration = []
  resultlistvalidation = []
  execstringcalibration = ''
  execstringvalidation = ''
  resultlistcalibration = {}
  resultlistvalidation = {}
  
  inputtable = grass.read_command('v.db.select', map = input, columns = 'cat', \
  flags = 'c', sep= ';')
  inputtable = re.split('\n',inputtable.strip('\n'))
  emptyinputdict = {}
  resultcalibrationdictcomplete = emptyinputdict.fromkeys(inputtable, [])
  resultvalidationdictcomplete = emptyinputdict.fromkeys(inputtable, [])
  resultvalidationdictflagscomplete = emptyinputdict.fromkeys(inputtable, [])
  if mcsim:
    resultvalidationdictmincomplete = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictq05complete = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictq25complete = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictq50complete = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictq75complete = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictq95complete = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictmaxcomplete = emptyinputdict.fromkeys(inputtable, [])
  resultcalibrationdictmean = emptyinputdict.fromkeys(inputtable, [])
  resultvalidationdictmean = emptyinputdict.fromkeys(inputtable, [])
  resultcalibrationdictsd = emptyinputdict.fromkeys(inputtable, [])
  resultvalidationdictsd = emptyinputdict.fromkeys(inputtable, [])  
  if mcsim:
    resultvalidationdictminmean = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictq05mean = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictq25mean = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictq50mean = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictq75mean = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictq95mean = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictmaxmean = emptyinputdict.fromkeys(inputtable, [])
  calibrationproclist = []
  calibrationmodelnproclist = []
  validationmodelnproclist = []
  
  proc_count = 0
  #Parallel model runs for calibrations 
  for ensemblenum in range(int(ensembles)):
    
    parametername = str(inputvector + '_' + str(ensemblenum) + '.xml')
    logname = str(inputvector + '_' + str(ensemblenum) + '.log')
    
    if weightfactor == 'None' and samplingfactor == 'None' and boot == False:
      calibrationproclist.append(grass.start_command('v.fuzzy.calibrator', input = inputvector, \
                                 target = target, factors = parameters, iterations  = iterations, \
                                 fuzzysets = fuzzysets, parameter = parametername, log = logname, \
                                 overwrite = True, rulelimit = rulelimit))

    elif weightfactor == 'None' and samplingfactor == 'None' and boot == True:
      calibrationproclist.append(grass.start_command('v.fuzzy.calibrator', input = inputvector, \
                                 target = target, factors = parameters, iterations  = iterations, \
                                 fuzzysets = fuzzysets, parameter = parametername, log = logname, \
                                 overwrite = True, flags = 'b', rulelimit = rulelimit))
                                 
    elif weightfactor == 'None' and samplingfactor != 'None' and boot == True:
      calibrationproclist.append(grass.start_command('v.fuzzy.calibrator', input = inputvector, \
                                 target = target, factors = parameters, iterations  = iterations, \
                                 fuzzysets = fuzzysets, parameter = parametername, log = logname, \
                                 overwrite = True, samplingfactor = samplingfactor, flags = 'b', \
                                 rulelimit = rulelimit))

    elif weightfactor != 'None' and samplingfactor != 'None' and boot == True:
      calibrationproclist.append(grass.start_command('v.fuzzy.calibrator', input = inputvector, \
                                 target = target, factors = parameters, iterations  = iterations, \
                                 fuzzysets = fuzzysets, parameter = parametername, log = logname, \
                                 weightfactor = weightfactor, weightnum = weightnum, \
                                 overwrite = True, samplingfactor = samplingfactor, flags = 'wb', \
                                 rulelimit = rulelimit))                                 

    elif weightfactor != 'None' and samplingfactor == 'None' and boot == True:
      calibrationproclist.append(grass.start_command('v.fuzzy.calibrator', input = inputvector, \
                                 target = target, factors = parameters, iterations  = iterations, \
                                 fuzzysets = fuzzysets, parameter = parametername, log = logname, \
                                 weightfactor = weightfactor, weightnum = weightnum, \
                                 overwrite = True, flags = 'wb', rulelimit = rulelimit))

    else:
      calibrationproclist.append(grass.start_command('v.fuzzy.calibrator', input = inputvector, \
				 target = target, factors = parameters, iterations  = iterations, \
				 fuzzysets = fuzzysets, parameter = parametername, \
				 weightfactor = weightfactor, weightnum = weightnum, \
				 log = logname, overwrite = True, flags = 'w', rulelimit = rulelimit))
    proc_count += 1
    
    #check for maximum number of parallel processes defined by cpu count
    if (proc_count == workers) or (proc_count == int(ensembles)):
        for process in calibrationproclist:
            process.wait()
      
        #empty process list and reset counter
        calibrationproclist = []
        proc_count = 0
  
  #model evaluation and output dictionary creation for ensembles
  for ensemblenum in range(int(ensembles)):
      
      #set names for output files
      outputcalibrationname = str(inputvector + '_calibration_' + str(ensemblenum))
      outputvalidationname = str(inputvector + '_validation_' + str(ensemblenum))  
      parametername = str(inputvector + '_' + str(ensemblenum) + '.xml')
      if weightfactor == 'None':
        grass.run_command('v.fuzzy.model', input = input, parameter = parametername, \
                          output = outputcalibrationname, overwrite = True)

        grass.run_command('v.fuzzy.model', input = validation, parameter = parametername, \
                          output = outputvalidationname, overwrite = True)
      else:
	grass.run_command('v.fuzzy.model', input = input, parameter = parametername, \
                          output = outputcalibrationname, overwrite = True, flags = 'w')

        grass.run_command('v.fuzzy.model', input = validation, parameter = parametername, \
                          output = outputvalidationname, overwrite = True, flags = 'w')

      resultcalibration = grass.read_command('v.db.select', map = outputcalibrationname, columns = str('cat,result,' + parameters), flags = 'c', sep= ';')
      resultvalidation = grass.read_command('v.db.select', map = outputvalidationname, columns = str('cat,result,' + parameters), flags = 'c', sep= ';')
      
      if weightfactor != 'None':
	resultweight = grass.read_command('v.db.select', map = outputvalidationname, columns = str('cat,' + weightfactor), flags = 'c', sep= ';')
        resultweight = re.split('\n|;',resultweight.strip('\n'))
        resultweightdict = dict(zip(resultweight[::2], resultweight[1::2]))
        
      resultcalibration = re.split('\n|;',resultcalibration.strip('\n'))
      resultvalidation = re.split('\n|;',resultvalidation.strip('\n'))
      
      var_dist = 2 + len(parameters.split(','))
      parameter_validation = []
      for i in range(2, var_dist):
	  parameter_validation.append(resultcalibration[i::var_dist])
      parameter_validation = zip(*parameter_validation)

      resultcalibrationdict = dict(zip(resultcalibration[::var_dist], resultcalibration[1::var_dist]))
      resultvalidationdict = dict(zip(resultvalidation[::var_dist], resultvalidation[1::var_dist]))
      resultvalidationdictpara = dict(zip(resultcalibration[::var_dist], parameter_validation))

      factor_dict = ParameterXMLRange(parametername, weightfactor)
      
      for l in resultcalibrationdict:
	resultcalibrationdictcomplete[l] = resultcalibrationdictcomplete[l] + [resultcalibrationdict[l]]
      
      {i:j for i,j in resultcalibrationdictcomplete.items() if j != []}
      
      for l in resultvalidationdict:
        resultvalidationdictcomplete[l] = resultvalidationdictcomplete[l] + [resultvalidationdict[l]]
        validation_flag = ParameterComparisonFlag(l, factor_dict, parameters, resultvalidationdictpara)
        validation_flag_dict = {l:validation_flag}
        resultvalidationdictflagscomplete[l] = resultvalidationdictflagscomplete[l] + [validation_flag_dict[l]]
        if mcsim:
          if weightfactor == 'None':
	    mc_result = MonteCarloSim(l, parameters, parametername, resultvalidationdictpara, uncertainties)
          else:
	    mc_result = MonteCarloSim(l, parameters, parametername, resultvalidationdictpara, uncertainties, 
                                      weightfactor = weightfactor, weightnum = resultweightdict[l])
          validation_min_dict = {l:mc_result[0]}
          validation_q05_dict = {l:mc_result[1]}
          validation_q25_dict = {l:mc_result[2]}
          validation_q50_dict = {l:mc_result[3]}
          validation_q75_dict = {l:mc_result[4]}
          validation_q95_dict = {l:mc_result[5]}
          validation_max_dict = {l:mc_result[6]}
          resultvalidationdictmincomplete[l] = resultvalidationdictmincomplete[l] + [validation_min_dict[l]]
          resultvalidationdictq05complete[l] = resultvalidationdictq05complete[l] + [validation_q05_dict[l]]
          resultvalidationdictq25complete[l] = resultvalidationdictq25complete[l] + [validation_q25_dict[l]]
          resultvalidationdictq50complete[l] = resultvalidationdictq50complete[l] + [validation_q50_dict[l]]
          resultvalidationdictq75complete[l] = resultvalidationdictq75complete[l] + [validation_q75_dict[l]]
          resultvalidationdictq95complete[l] = resultvalidationdictq95complete[l] + [validation_q95_dict[l]]
          resultvalidationdictmaxcomplete[l] = resultvalidationdictmaxcomplete[l] + [validation_max_dict[l]]

      {i:j for i,j in resultvalidationdictcomplete.items() if j != []}
      resultvalidationdictflagscomplete = {i:j for i,j in resultvalidationdictflagscomplete.items() if j != []}     
      if mcsim:
        resultvalidationdictmincomplete = {i:j for i,j in resultvalidationdictmincomplete.items() if j != []}
        resultvalidationdictq05complete = {i:j for i,j in resultvalidationdictq05complete.items() if j != []}
        resultvalidationdictq25complete = {i:j for i,j in resultvalidationdictq25complete.items() if j != []}
        resultvalidationdictq50complete = {i:j for i,j in resultvalidationdictq50complete.items() if j != []}
        resultvalidationdictq75complete = {i:j for i,j in resultvalidationdictq75complete.items() if j != []}
        resultvalidationdictq95complete = {i:j for i,j in resultvalidationdictq95complete.items() if j != []}
        resultvalidationdictmaxcomplete = {i:j for i,j in resultvalidationdictmaxcomplete.items() if j != []}
      
  # Create dicionaries with mean calibration and mean, min and max validation model result values.
  for j in resultcalibrationdictcomplete:
    resultcalibrationdictmean[j] = resultcalibrationdictmean[j] + [numpy.mean(map(float,resultcalibrationdictcomplete[j]))]

  for j in resultvalidationdictcomplete:
    resultvalidationdictmean[j] = resultvalidationdictmean[j] + [numpy.mean(map(float,resultvalidationdictcomplete[j]))]

  for j in resultcalibrationdictcomplete:
    resultcalibrationdictsd[j] = resultcalibrationdictsd[j] + [numpy.std(map(float,resultcalibrationdictcomplete[j]))]

  for j in resultvalidationdictcomplete:
    resultvalidationdictsd[j] = resultvalidationdictsd[j] + [numpy.std(map(float,resultvalidationdictcomplete[j]))]
  if mcsim:
    for j in resultvalidationdictmincomplete:
      resultvalidationdictminmean[j] = resultvalidationdictminmean[j] + [numpy.min(map(float,resultvalidationdictmincomplete[j]))] 

    for j in resultvalidationdictq05complete:
      resultvalidationdictq05mean[j] = resultvalidationdictq05mean[j] + [numpy.mean(map(float,resultvalidationdictq05complete[j]))] 

    for j in resultvalidationdictq25complete:
      resultvalidationdictq25mean[j] = resultvalidationdictq25mean[j] + [numpy.mean(map(float,resultvalidationdictq25complete[j]))] 

    for j in resultvalidationdictq50complete:
      resultvalidationdictq50mean[j] = resultvalidationdictq50mean[j] + [numpy.mean(map(float,resultvalidationdictq50complete[j]))] 

    for j in resultvalidationdictq75complete:
      resultvalidationdictq75mean[j] = resultvalidationdictq75mean[j] + [numpy.mean(map(float,resultvalidationdictq75complete[j]))] 

    for j in resultvalidationdictq95complete:
      resultvalidationdictq95mean[j] = resultvalidationdictq95mean[j] + [numpy.mean(map(float,resultvalidationdictq95complete[j]))] 

    for j in resultvalidationdictmaxcomplete:
      resultvalidationdictmaxmean[j] = resultvalidationdictmaxmean[j] + [numpy.max(map(float,resultvalidationdictmaxcomplete[j]))]
  
  # Remove emtpy values from dictionaries
  {i:j for i,j in resultcalibrationdictmean.items() if j != []}
  {i:j for i,j in resultvalidationdictmean.items() if j != []}
  {i:j for i,j in resultcalibrationdictsd.items() if j != []}
  {i:j for i,j in resultvalidationdictsd.items() if j != 0.}  
  if mcsim:
    {i:j for i,j in resultvalidationdictminmean.items() if j != []}
    {i:j for i,j in resultvalidationdictq05mean.items() if j != []}
    {i:j for i,j in resultvalidationdictq25mean.items() if j != []}
    {i:j for i,j in resultvalidationdictq50mean.items() if j != []}
    {i:j for i,j in resultvalidationdictq75mean.items() if j != []}
    {i:j for i,j in resultvalidationdictq95mean.items() if j != []}
    {i:j for i,j in resultvalidationdictmaxmean.items() if j != []}
  if mcsim:
    return(resultcalibrationdictcomplete, resultvalidationdictcomplete, 
           resultcalibrationdictmean, resultvalidationdictmean, 
           resultvalidationdictsd, resultvalidationdictflagscomplete, 
           resultvalidationdictminmean, resultvalidationdictq05mean, 
           resultvalidationdictq25mean, resultvalidationdictq50mean,
           resultvalidationdictq75mean, resultvalidationdictq95mean,
           resultvalidationdictmaxmean)
  else:
    return(resultcalibrationdictcomplete, resultvalidationdictcomplete, 
           resultcalibrationdictmean, resultvalidationdictmean, 
           resultvalidationdictsd, resultvalidationdictflagscomplete)
  
def transformfactorweighting(input, weightfactor):
  # Get list of weights from vector column, transformation to numerical values
  weightfactorlist = grass.read_command('v.db.select', map = input, columns = weightfactor, flags = 'c')
  weightfactorlist = weightfactorlist.rstrip('\n').split('\n')
  weightfactorkeylist = dict.fromkeys(weightfactorlist).keys()
  weightiternum = range(len(weightfactorkeylist))
  weightfactorkeydict = dict(zip(weightfactorkeylist, weightiternum))
  
  #add numerical values of weighting factor as new column to vector table
  weightfactorcolumnname = str(weightfactor + '_num')
  grass.run_command('v.db.addcolumn', map = input, columns = str(weightfactorcolumnname + 
								  ' double precision'))
  for weightfactorkey in weightfactorkeylist:
    cmd = "UPDATE %s SET %s=%s WHERE %s='%s'" % (input, weightfactorcolumnname, 
						 weightfactorkeydict[weightfactorkey],weightfactor, 
						 weightfactorkey)
    grass.write_command('db.execute', input = '-', stdin = cmd)
  
  return(weightfactorcolumnname, weightiternum)

def splitvectordata(input, splitfactor):
  
    splitfactorlist = grass.read_command('v.db.select', map = input, columns = splitfactor, flags = 'c')
    splitfactorlist = splitfactorlist.rstrip('\n').split('\n')
    splitfactorkeylist = dict.fromkeys(splitfactorlist).keys()
    
    splitvectorlist =[]
    excludedvectorlist =[]
    splitcount = 0
    
    for splitfactorname in splitfactorkeylist:
      grass.run_command('v.extract', input = input, output = str(input + '_' + 
                        str(splitcount)), overwrite = True, 
                        where = str(splitfactor + " != '" + splitfactorname + "'"))
      
      grass.run_command('v.extract', input = input, output = str(input +  '_' + 
                        str(splitcount) + '_extract'), overwrite = True, 
                        where = str(splitfactor + " = '" + splitfactorname + "'"))
      
      splitvectorlist.append(str(input + '_' + str(splitcount)))
      excludedvectorlist.append(str(input + '_' + str(splitcount) + '_extract'))
      splitcount += 1
    return(splitvectorlist,excludedvectorlist)

#extraction and visualisation of parameters for the fuzzysets of calibrated ensembles and
def xmlparse(vinputfile, ensemblenum):
  reader = vtk.vtkXMLDataParser()
  xmlstringall = []
  for i in range(int(ensemblenum)):
    reader.SetFileName(str(vinputfile + '_' + str(i) + '.xml'))
    reader.Parse()
    xmltable = []
    root = reader.GetRootElement()
    if root.GetName() == "FuzzyInferenceScheme":
      elenum = root.GetNumberOfNestedElements()
      for e in range(0,elenum,1):
        element = root.GetNestedElement(e)
        if element.GetName() == 'Factor':
          setnum = element.GetNumberOfNestedElements()
          for s in range(0,setnum):
            set = element.GetNestedElement(s)
            if set.GetAttribute('type') == 'Triangular':
              trianglenum = set.GetNumberOfNestedElements()
              triangle = set.GetNestedElement(0)
              xmldict = {}
              xmldict['ensemble'] = str(i)
              xmldict['factor'] = element.GetAttribute('name')
              xmldict['min'] = element.GetAttribute('min')
              xmldict['max'] = element.GetAttribute('max')
              xmldict['set'] = set.GetAttribute('position')
              xmldict['center'] = triangle.GetAttribute('center')
              xmldict['left'] = triangle.GetAttribute('left')
              xmldict['right'] = triangle.GetAttribute('right')
              xmltable.append(xmldict)	      
    xmlstring = []
    xmlstring.append(string.join(xmltable[0].keys(),','))
    if i == 0:
      xmlstringall.append(string.join(xmltable[0].keys(),','))
    for data in xmltable:
      xmlstring.append(string.join(data.values(),','))
      xmlstringall.append(string.join(data.values(),','))
  xmlstringfinal = string.join(xmlstringall,'\n')
  #print(xmlstringfinal)
  file = open(str('/tmp/' + vinputfile + '_fuzzysets' + '.txt'),'w')
  file.write(xmlstringfinal)
  file.close()
  # Run R visualise the fuzzy sets auomtatically and store the result in  pdf file
  RScript = """ 
  PlotFuzzysets <- function(input, dir = '/tmp/') {
  # Plot Fuzzysets imported from XML Fuzzy Interference Scheme created by TAG2E.
  # An PDF document will be created.
  #
  # args:
  #  input: Name of input text file created by m.fuzzy.validation, containing 
  #         fuzzy set positions.
  #  dir:   Name of directory containing input file. Default is /tmp.

  # Import input file.
  data = read.csv(file = paste(dir, input, sep = ''), header = TRUE, sep = ",")

  # Get number of factors.
  factor_amount <- length(levels(data$factor))

  # Create pdf document.
  pdf(file = paste(substr(input, 1, nchar(input) - 4), ".pdf", sep = ''), width = 12, 
      height = 8)
  
  par(lwd = 2, cex = 1.5, mar = c(5, 5, 5, 5))

  # Loop through fuzzy sets for different factors.
  for (f in 1:length(levels(data$factor))) {

    # Create subset for each factor.
    setfactor = levels(data$factor)[f]
    set = subset(data,factor == setfactor)
    
    # Compute fuzzy set amount and position statistics for ensembles.
    
    factor_length <- nrow(subset(set, ensemble == 0, select = set))
    factor_labels <- subset(set, ensemble == 0, select = set)[,1]
    center_labels <- subset(set, ensemble == 0, select = center)[,1]
    maximum <- max(set$max)
    minimum <- max(set$min)

    if (factor_length == 2) {
      plot(1, type='n', ylim = c(0,1), xlim = c(minimum, maximum), 
           xlab = paste(setfactor), ylab = "Membership grade",
           main = paste('Fuzzysets for Factor ', setfactor))
      axis(side = 3, labels = factor_labels, at = center_labels, 
           cex.axis = 1, tick = FALSE)
      for ( i in seq(0, max(set$ensemble))) { 
        center_sub <- as.vector(subset(set, ensemble == i, select = center))
        right_sub <- subset(set, ensemble == i, select = right)
        left_sub <- subset(set, ensemble == i, select = left)
        segments(center_sub[1, 1], 1, center_sub[1, 1] + right_sub[1, 1], 0, 
                 col = 'blue')
        segments(minimum, 1, center_sub[1, 1], 1, col = 'blue')
        segments(center_sub[1, 1] + right_sub[1, 1], 0, maximum, 0, col = 'blue')
        segments(center_sub[2, 1] - left_sub[2, 1], 0, minimum, 0, col = 'green')
        segments(center_sub[2, 1] - left_sub[2, 1], 0, center_sub[2, 1], 1, col = 'green')
        segments(center_sub[2, 1], 1, maximum, 1, col = 'green')
        }      
      }
    else if (factor_length == 3) {
      plot(1, type='n', ylim = c(0, 1), xlim = c(minimum, maximum), 
           xlab = paste(setfactor), ylab = "Membership grade",
           main = paste('Fuzzysets for Factor ', setfactor))
      axis(side = 3, labels = factor_labels, at = center_labels, cex.axis = 1, tick = FALSE)
      for ( i in seq(0, max(set$ensemble))) { 
        center_sub <- subset(set, ensemble == i, select = center)
        right_sub <- subset(set, ensemble == i, select = right)
        left_sub <- subset(set, ensemble == i, select = left)
        segments(center_sub[1, 1], 1, center_sub[1, 1] + right_sub[1, 1], 0, 
                 col = 'blue')
        segments(minimum, 1, center_sub[1, 1], 1, col = 'blue')
        segments(center_sub[1, 1] + right_sub[1, 1], 0, maximum, 0, col = 'blue')
        segments(minimum, 0, center_sub[2, 1] - left_sub[2, 1], 0, col = 'red')
        segments(center_sub[2, 1] - left_sub[2, 1], 0, center_sub[2, 1], 1, 
                 col = 'red')
        segments(center_sub[2, 1], 1, center_sub[2, 1] + right_sub[2, 1], 0, 
                 col = 'red')
        segments(center_sub[2, 1] + right_sub[2, 1], 0, maximum, 0, col = 'red')
        segments(center_sub[3, 1] - left_sub[3, 1], 0, minimum, 0, col = 'green')
        segments(center_sub[3, 1] - left_sub[3, 1], 0, center_sub[3, 1], 1, 
                 col = 'green')
        segments(center_sub[3, 1], 1, maximum, 1, col = 'green')
        }
      }
    else if (factor_length == 4) {
      plot(1, type='n', ylim = c(0, 1), xlim = c(minimum, maximum), 
           xlab = paste(setfactor), ylab = "Membership grade",
           main = paste('Fuzzysets for Factor ', setfactor))
      axis(side = 3, labels = factor_labels, at = center_labels, cex.axis = 1, tick = FALSE)
      for ( i in seq(0, max(set$ensemble))) { 
        center_sub <- subset(set, ensemble == i, select = center)
        right_sub <- subset(set, ensemble == i, select = right)
        left_sub <- subset(set, ensemble == i, select = left)
        segments(center_sub[1, 1], 1, center_sub[1, 1] + right_sub[1, 1], 0, 
                 col = 'blue')
        segments(minimum, 1, center_sub[1, 1], 1, col = 'blue')
        segments(center_sub[1, 1] + right_sub[1, 1], 0, maximum, 0, col = 'blue')
        segments(minimum, 0, center_sub[2, 1] - left_sub[2, 1], 0, col = 'red')
        segments(center_sub[2, 1] - left_sub[2, 1], 0, center_sub[2, 1], 1, 
                 col = 'red')
        segments(center_sub[2, 1], 1, center_sub[2, 1] + right_sub[2, 1], 0, 
                 col = 'red')
        segments(center_sub[2, 1] + right_sub[2, 1], 0, maximum, 0, col = 'red')
        segments(center_sub[3, 1] - left_sub[3, 1], 0, center_sub[3, 1], 1, 
                 col = 'black')
        segments(center_sub[3, 1], 1, center_sub[3, 1] + right_sub[3, 1], 0, 
                 col = 'black')
        segments(center_sub[3, 1] + right_sub[3, 1], 0, maximum, 0, col = 'black')
        segments(center_sub[4, 1] - left_sub[4, 1], 0, minimum, 0, col = 'green')
        segments(center_sub[4, 1] - left_sub[4, 1], 0, center_sub[4, 1], 1, 
                 col = 'green')
        segments(center_sub[4, 1], 1, maximum, 1, col = 'green')
        }
      }
    }
  dev.off()
  }
  PlotFuzzysets(input = "inputfilename")
  """
  RScript = RScript.replace("inputfilename", str(vinputfile + '_fuzzysets' + '.txt')) 
    
  inputlist = ["R", "--vanilla"]
  proc = subprocess.Popen(args=inputlist, stdin=subprocess.PIPE)
  proc.stdin.write(RScript)
  proc.communicate()

def ValidationPlot(input, target, montecarloflag, parainput):
  RScript = """ 
  ## Script for evaluating cross validation of fuzzy model.
  require(Hmisc)

  # Function to calculate rsquared for two vector list elements (x,y)
  rsquaredcalc <- function(x, y){
    # Compute coefficient of determination.
    totalsumofsquares = sum((x - mean(x))^2)
    residualsumofsquares = sum((x - y)^2)
    rsquared = 1 - residualsumofsquares / totalsumofsquares
    return(rsquared)
    }
  EvalFuzzyValidation <- function(file.name, target, parameters, file.dir = getwd(), 
                                  missing = TRUE, outlier = TRUE, na.value = 9999, 
                                  montecarlo = TRUE) {
    # Import valdiation file created by m.fuzzy.validation(fmta) and create 
    # calibration and valdiation plots.
    #
    # args:
    #  file.name:  Name of m.fuzzy.validation result file.
    #  target:     Column name of target variable in crossvalidation.
    #  parameters: Name of the parameters used for model calibration.
    #  file.dir:   Path to directory containing file.name.
    #  missing:    Boolean if missing values should be included. Default is TRUE.
    #  outlier:    Boolean if outlier for calibration range should be colored.
    #              Default is TRUE.
    #  na.value:   Value used as no data representation.
    #  montecarlo: Boolean if montecarlo simulation flag is set. Default is TRUE.    
  
    # Import m.fuzzy.validation result file.
    file.location <- paste(file.dir, '/', file.name, sep = '')
    validata = read.csv(file.location, header = T, sep = ",")

    # Processing data formats.
    validata[validata == na.value] = NA

    # Check if NA values should be excluded.
    if (missing == FALSE)
      validata = na.omit(validata)

    # Set axis label names for output graphics.
    ylabname <- expression(paste(N[2],'O-N  in [g' %.%  m^-2 %.% a^-1, ']', sep = ''))

    #target <- 'n2oaverage_flux'


    xlab <- expression(paste('Measured ', N[2],'O-N  in [g' %.%  m^-2 %.% a^-1, ']', 
                     sep = ''))
                   
    ylab <- expression(paste('Modelled ', N[2],'O-N  in [g' %.%  m^-2 %.% a^-1, ']', 
                     sep = ''))
    if (!is.vector(validata[[target]])) {
      target <- 'n2oaverage_log'
      xlab <- expression(paste('Measured ', N[2],'O-N  in log [g' %.%  m^-2 %.% a^-1, ']', 
                         sep = ''))
                   
      ylab <- expression(paste('Modelled ', N[2],'O-N  in log [g' %.%  m^-2 %.% a^-1, ']', 
                         sep = ''))
      }

    vali.title <- "Flux Validation Plot"
    cali.title <- "Flux Calibration Plot"
    axis.lim <- range(validata[[target]])
    leg.factors <- levels(as.factor(validata$flag_0))

    # Set color palette for plotting.
    if (outlier) {
      palette(c("orange", "black", "grey", "red"))
      } else {
      palette(c("black", "black"))
      }
      
    if (missing) {
      plot_val_name <- 'Validation_all_'
      } else {
      palette(c("black", "black"))
      plot_val_name <- 'Validation_narm_'
      }
      
    #figure.dir <- "./figures/"
    figure.dir <- file.dir

    valicolor =  factor(validata$flag_0, levels = c("0", "1", "2", "3"))

    # Create validation plot.
    pdf(file = paste(figure.dir, '/', plot_val_name, nrow(validata), '_', 
                     Sys.Date(), '.pdf', sep = ''))
  
      par(mar = c(5, 5, 4, 2), bty = 'o', tck = 0.01, cex.axis = 1.25, cex.lab = 1.25,
          cex.main = 1.25, cex = 1.25, lwd = 1.25, pch = 20, las = 1)
      plot(validata[[target]], validata$validation_mean, ylab = ylab, xlab = xlab,
           xlim = axis.lim, ylim = axis.lim, col = valicolor, 
           cex = 1, main = vali.title)
      errbar(x= validata[[target]], y = validata$validation_mean, 
             yplus = validata$validation_mean + validata$validation_sd, 
             yminus = validata$validation_mean - validata$validation_sd, 
             add = T, pch = 20, col = valicolor)
      mtext(paste('Direct comparison NSE: ', round(rsquaredcalc(validata[[target]], 
           validata$validation_mean), 2)))
      abline(0, 1)
      if (missing & outlier) {
        legend(axis.lim[1], axis.lim[2], c("Outlier", "Valide", "Missing", "Missing + Outlier") , 
               col = c("orange", "black", "grey", "red"), pch = 20)
        } else if (outlier) {
        legend(axis.lim[1], axis.lim[2], c("Outlier", "Valide") , 
               col = c("orange", "black"), pch = 20)
        }

    dev.off()

    # Set color palette for plotting.
    if (missing) {
      palette(c("black", "grey"))
      plot_cal_name <- 'Calibration_all_'
      } else {
      palette(c("black", "black"))
      plot_cal_name <- 'Calibration_narm_'
      }
    calicol <- grep(substr(file.name, 1, nchar(file.name) - 22), names(validata))
    calidata <- sapply(validata[calicol], function(x) x != validata[2])
    if (sum(calidata) != 0) {
      calimean <- rowMeans(validata[calicol][calidata[1, ]])
      calisd <- apply(validata[calicol][calidata[1, ]], MAR = 1, sd)
      } else {
      calimean <- rowMeans(validata[calicol])
      calisd <-  apply(validata[calicol], MAR=1, function(x) x = 0) 
      }
    # Create calibration plot.
    pdf(file = paste(figure.dir, '/', plot_cal_name, nrow(validata), '_', 
                     Sys.Date(), '.pdf', sep = ''))

      par(mar = c(5, 5, 4, 2), bty = 'o', tck = 0.01, cex.axis = 1.25, cex.lab = 1.25,
      cex.main = 1.25, cex = 1.25, lwd = 1.25, pch = 20, las = 1)
      plot(validata[[target]], calimean, ylab = ylab, xlab = xlab, xlim = axis.lim, 
           ylim = axis.lim, col = as.factor(validata$flag_0 == 2 | validata$flag_0 == 3 ), 
           cex = 1, main = cali.title)
      errbar(x= validata[[target]], y = calimean, yplus = calimean + calisd, 
             yminus = calimean - calisd, add = T, pch = 20,
             col = as.factor(validata$flag_0 == 2 | validata$flag_0 == 3 ))
      mtext(paste('Direct comparison NSE: ', round(rsquaredcalc(validata[[target]], 
            calimean), 2)))
      abline(0, 1)
      if (missing) {
        legend(axis.lim[1], axis.lim[2], c("Valide", "Missing"), 
               col = c("black", "grey"), pch = 20)
        }
    dev.off()
    
    # Create Monte Carlo simulation plots for all parameters and outcome.
    if (montecarlo) {
      paralist <- strsplit(parameters,',')
      paralist <- c(unlist(paralist), target)
      for (element in paralist) {
        # Sort data frame by parameter.
        validata_sort <- validata[order(validata[[element]]), ]
        validata_sort = na.omit(validata_sort)
        plot_mc_name <- paste('MC_result_', element, '_', sep = '')
        mc.title <- "Monte Carlo Simulation Plot"
        para.lim <- range(validata_sort[[element]])
        if (element == target)
          paralab <- xlab
        else
          paralab <- element
        # Create parameter plot.
          pdf(file = paste(figure.dir, '/', plot_mc_name, nrow(validata_sort), '_', 
                     Sys.Date(), '.pdf', sep = ''))
            par(mar = c(5, 5, 4, 2), bty = 'o', tck = 0.01, cex.axis = 1.25, 
                cex.lab = 1.25, cex.main = 1.25, cex = 1.25, lwd = 1.25, pch = 20, 
                las = 1)
            plot(validata_sort[[element]], validata_sort$q50, type = 'n', 
                 ylim = axis.lim, ylab = ylab, xlim = para.lim, xlab = paralab, 
                 pch = 20, cex = 0.5, main = mc.title)
            polygon(x = c(validata_sort[[element]], rev(validata_sort[[element]])),
                    y = c(validata_sort$q95, rev(validata_sort$q5)), col = 'grey', 
                    border = "white")
            lines(validata_sort[[element]], validata_sort$q50, lwd = 2)
            #with(validata_sort, errbar(x = validata_sort[[element]], y = q50, yplus = q75, 
            #     yminus = q25, add = T, type = 'n', lwd = 1))
          dev.off()
        }
      }
    }
  if (as.numeric(montesim) == 1) {
    EvalFuzzyValidation(file.name = "inputfilename", target = "targetname", 
                        parameters = "parainput")
    EvalFuzzyValidation(file.name = "inputfilename", target = "targetname", 
                        parameters = "parainput", missing = F, outlier = F)
    } else {
    EvalFuzzyValidation(file.name = "inputfilename", target = "targetname", 
                        parameters = "parainput", montecarlo = F)
    EvalFuzzyValidation(file.name = "inputfilename", target = "targetname", 
                        parameters = "parainput", missing = F, outlier = F, 
                        montecarlo = F)
    }
  """
  RScript = RScript.replace("inputfilename", str(input + '_validation_result.csv')) 
  RScript = RScript.replace("targetname", str(target))
  RScript = RScript.replace("parainput", str(parainput))
  if montecarloflag:
    RScript = RScript.replace("montesim", "1")
  else:
    RScript = RScript.replace("montesim", "0")
  inputlist = ["R", "--vanilla"]
  proc = subprocess.Popen(args=inputlist, stdin=subprocess.PIPE)
  proc.stdin.write(RScript)
  proc.communicate()

def main():
    input = options['input']
    output = options['output']
    target = options['target']
    weightfactor = options['weightfactor']
    splitfactor = options['splitfactor']
    samplingfactor = options['samplingfactor']
    parameters = options['parameters']
    uncertainties = options['uncertainties']
    ensembles = options['ensembles']
    iterations = options['iterations']
    fuzzysets = options['fuzzysets']
    allparameterflag = flags['a']
    bootflag = flags['b']
    splitfactorflag = flags['e']
    categoricalflag = flags['c']
    weightingflag = flags['w']
    montecarloflag = flags['m']
    inputrule = options['rulelimit']

    if montecarloflag:
      if len(parameters.split(',')) != len(uncertainties.split(',')) \
      and len(uncertainties) != 0:
        print("ERROR: Parameter and Uncertainty length differs")
        sys.exit()    
        
    if splitfactorflag:
      splitvectorinputlist, splitvectorexcludedlist = splitvectordata(input, splitfactor)
    else:
      splitvectorinputlist = list()
      splitvectorinputlist.append(input)
      splitvectorexcludedlist = list()
      splitvectorexcludedlist.append(input)
    if weightingflag:
      
      if categoricalflag:
	weightfactorcolumnname, weightiternum = transformfactorweighting(input, weightfactor)
      else:
	weightfactorcolumnname = weightfactor
	weightfactorlist = grass.read_command('v.db.select', map = input, columns = weightfactor, flags = 'c')
	weightfactorlist = weightfactorlist.rstrip('\n').split('\n')
	weightfactorkeylist = dict.fromkeys(weightfactorlist).keys()
	weightiternum = len(weightfactorkeylist)

    #splitvectorinputlist = ','.join(splitvectorinputlist)
    #splitvectorexcludedlist = ','.join(splitvectorexcludedlist)
        
    #create empty dictionaries with list items and vector input category keys
    inputtable = grass.read_command('v.db.select', map = input, columns = 'cat', flags = 'c', sep= ';')
    inputtable = re.split('\n',inputtable.strip('\n'))
    emptyinputdict = {}
    calibrationresultlistcomplete = emptyinputdict.fromkeys(inputtable, [])
    validationresultlistcomplete = emptyinputdict.fromkeys(inputtable, [])
    validationresultsdcomplete = emptyinputdict.fromkeys(inputtable, [])
    resultvalidationdictflagscomplete = emptyinputdict.fromkeys(inputtable, [])
    if montecarloflag:
      resultvalidationdictmincomplete = emptyinputdict.fromkeys(inputtable, [])
      resultvalidationdictq05complete = emptyinputdict.fromkeys(inputtable, [])
      resultvalidationdictq25complete = emptyinputdict.fromkeys(inputtable, [])
      resultvalidationdictq50complete = emptyinputdict.fromkeys(inputtable, [])
      resultvalidationdictq75complete = emptyinputdict.fromkeys(inputtable, [])
      resultvalidationdictq95complete = emptyinputdict.fromkeys(inputtable, [])
      resultvalidationdictmaxcomplete = emptyinputdict.fromkeys(inputtable, []) 
    calibrationresultmeanlistcomplete = emptyinputdict.fromkeys(inputtable, [])
    parameterdictcomplete = emptyinputdict.fromkeys(inputtable, [])
    
    targettable = grass.read_command('v.db.select', map = input, columns = str('cat,' + target), flags = 'c', sep= ';')
    targettable = re.split('\n|;',targettable.strip('\n'))
    targetdict = dict(zip(targettable[::2],targettable[1::2]))
    
    if splitfactorflag:
      splitfactortable = grass.read_command('v.db.select', map = input, columns = str('cat,' + splitfactor), flags = 'c', sep= ';')
      splitfactortable = re.split('\n|;',splitfactortable.strip('\n'))
      splitfactordict = dict(zip(splitfactortable[::2],splitfactortable[1::2]))

      for l in targetdict:
	parameterdictcomplete[l] = parameterdictcomplete[l] + [targetdict[l]] + [splitfactordict[l]]    
    else:
      for l in targetdict:
	parameterdictcomplete[l] = parameterdictcomplete[l] + [targetdict[l]]
    
    parameternamelist = parameters.split(',')
    colnum = len(parameternamelist)
    for parameter in parameternamelist:
      parametertable = grass.read_command('v.db.select', map = input, columns = str('cat,' + parameter), flags = 'c', sep= ';')
      parametertable = re.split('\n|;',parametertable.strip('\n'))
      parameterdict = dict(zip(parametertable[::2],parametertable[1::2]))
    
      for l in parameterdict:
	parameterdictcomplete[l] = parameterdictcomplete[l] + [parameterdict[l]]
    calibrationnamecomplete = []
    
    for inputvector in splitvectorinputlist:
      
      if splitfactorflag:
	excludedvector = str(inputvector + '_extract')
      else:
	excludedvector = inputvector
	
      calibrationnamecomplete.append(inputvector)
      
      if weightfactor == 'None' and montecarloflag:
	calibrationresultdict, validationresultdict, calibrationresultmeandict, validationresultmeandict, \
	validationresultsddict, resultvalidationdictflags, resultvalidationdictmin, \
	resultvalidationdictq05, resultvalidationdictq25, resultvalidationdictq50, \
	resultvalidationdictq75, resultvalidationdictq95, resultvalidationdictmax \
	= crosscalibration(input, inputvector, excludedvector, parameters, uncertainties, \
	target, ensembles, iterations, fuzzysets, mcsim = True, boot = bootflag, \
	samplingfactor = samplingfactor, rulelimit = inputrule)
      elif weightfactor == 'None' and montecarloflag == False:
	calibrationresultdict, validationresultdict, calibrationresultmeandict, validationresultmeandict, \
	validationresultsddict, resultvalidationdictflags, \
	= crosscalibration(input, inputvector, excludedvector, parameters, uncertainties, \
	target, ensembles, iterations, fuzzysets, mcsim = False, boot = bootflag, \
	samplingfactor = samplingfactor, rulelimit = inputrule)
      elif weightfactor != 'None' and montecarloflag:
	calibrationresultdict, validationresultdict, calibrationresultmeandict, validationresultmeandict, \
	validationresultsddict, resultvalidationdictflags, resultvalidationdictmin, \
	resultvalidationdictq05, resultvalidationdictq25, resultvalidationdictq50, \
	resultvalidationdictq75, resultvalidationdictq95, resultvalidationdictmax \
	= crosscalibration(input, inputvector, excludedvector, parameters, uncertainties, \
	target, ensembles, iterations, fuzzysets, weightfactorcolumnname, weightiternum, \
	mcsim = True, boot = bootflag, samplingfactor = samplingfactor, rulelimit = inputrule)
      else:
	calibrationresultdict, validationresultdict, calibrationresultmeandict, validationresultmeandict, \
	validationresultsddict, resultvalidationdictflags, \
	= crosscalibration(input, inputvector, excludedvector, parameters, uncertainties, \
	target, ensembles, iterations, fuzzysets, weightfactorcolumnname, weightiternum, \
	mcsim = False, boot = bootflag, samplingfactor = samplingfactor, rulelimit = inputrule)
      
      # Create Fuzzyset plots.
      xmlparse(inputvector, ensembles)
        
      for l in calibrationresultmeandict:
	calibrationresultmeanlistcomplete[l] = calibrationresultmeanlistcomplete[l] + [calibrationresultmeandict[l]]
      
      for l in validationresultmeandict:
	validationresultlistcomplete[l] = validationresultlistcomplete[l] + [validationresultmeandict[l]]

      for l in validationresultsddict:
	validationresultsdcomplete[l] = validationresultsdcomplete[l] + [validationresultsddict[l]]
	
      for l in resultvalidationdictflags:
	resultvalidationdictflagscomplete[l] = resultvalidationdictflagscomplete[l] + resultvalidationdictflags[l]

      if montecarloflag:
	
        for l in resultvalidationdictmin:
	  resultvalidationdictmincomplete[l] = resultvalidationdictmincomplete[l] + resultvalidationdictmin[l]

        for l in resultvalidationdictq05:
	  resultvalidationdictq05complete[l] = resultvalidationdictq05complete[l] + resultvalidationdictq05[l]

        for l in resultvalidationdictq25:
	  resultvalidationdictq25complete[l] = resultvalidationdictq25complete[l] + resultvalidationdictq25[l]

        for l in resultvalidationdictq50:
	  resultvalidationdictq50complete[l] = resultvalidationdictq50complete[l] + resultvalidationdictq50[l]

        for l in resultvalidationdictq75:
	  resultvalidationdictq75complete[l] = resultvalidationdictq75complete[l] + resultvalidationdictq75[l]

        for l in resultvalidationdictq95:
	  resultvalidationdictq95complete[l] = resultvalidationdictq95complete[l] + resultvalidationdictq95[l]

        for l in resultvalidationdictmax:
	  resultvalidationdictmaxcomplete[l] = resultvalidationdictmaxcomplete[l] + resultvalidationdictmax[l]
    if montecarloflag:
      
      for l in parameterdictcomplete:
        validationresultlistcomplete[l] = [i[0] for i in validationresultlistcomplete[l] if not numpy.isnan(i)] + \
                                          [i[0] for i in validationresultsdcomplete[l] if not i == [0]] + \
                                          resultvalidationdictmincomplete[l] + \
                                          resultvalidationdictq05complete[l]+ \
                                          resultvalidationdictq25complete[l] + \
                                          resultvalidationdictq50complete[l] + \
                                          resultvalidationdictq75complete[l] + \
                                          resultvalidationdictq95complete[l] + \
                                          resultvalidationdictmaxcomplete[l] + \
                                          parameterdictcomplete[l] + \
                                          resultvalidationdictflagscomplete[l] + \
                                          [i[0] for i in calibrationresultmeanlistcomplete[l]]
    else:
      
      for l in parameterdictcomplete:
        validationresultlistcomplete[l] = [i[0] for i in validationresultlistcomplete[l] if not numpy.isnan(i)] + \
                                          [i[0] for i in validationresultsdcomplete[l] if not i == [0]] + \
                                          parameterdictcomplete[l] + \
                                          resultvalidationdictflagscomplete[l] + \
                                          [i[0] for i in calibrationresultmeanlistcomplete[l]]

    flag_list = []
    for ensemblenum in range(int(ensembles)):
      flag_list.append(str('flag_' + str(ensemblenum)))
    if montecarloflag:
      quantile_list = []  
      for i in (5,25,50,75,95):
        quantile_list.append(str('q'+str(i)))
      
    o = open(str(input + '_validation_result.csv'), "w+")
    w = csv.writer(o)
    
    if splitfactorflag and montecarloflag:
      w.writerow(['cat'] + \
                 ['validation_mean'] + \
                 ['validation_sd'] + \
                 ['validation_min'] + \
                 quantile_list + \
                 ['validation_max'] + \
                 [target] + \
                 [splitfactor] + \
                 parameternamelist + \
                 flag_list + \
                 calibrationnamecomplete)
    elif splitfactorflag and montecarloflag == False:
      w.writerow(['cat'] + \
                 ['validation_mean'] + \
                 ['validation_sd'] + \
                 [target] + \
                 [splitfactor] + \
                 parameternamelist + \
                 flag_list + \
                 calibrationnamecomplete)                 
    else:
      w.writerow(['cat'] + \
                 ['validation_mean'] + \
                 ['validation_sd'] + \
                 [target] + \
                 parameternamelist + \
                 flag_list + \
                 calibrationnamecomplete)
    
    for key, val in validationresultlistcomplete.items():
      w.writerow([key] + val)  
    o.close()
    
    # Create Validation and Calibration Plots.
    ValidationPlot(input, target, montecarloflag, parameters)

if __name__ == "__main__":
    options, flags = grass.parser()
    main()
