var HadoopConf = {
	addConf: function (name, value, description) {
		if (!this.confs) this.confs = {};
		this.confs[name] = {description: description, value: value};
	},
	
	getValue: function (name) {
		if (!this.confs) return null;
		if (!this.confs[name]) return null;
		if (this.confs[name].user) {
			return this.confs[name].user;
		} else {
			return this.confs[name].value;
		}
	},
	
	setValue: function (name, value) {
		if (!this.confs) return;
		if (this.confs[name]) {
			this.confs[name].user = value;
		} else {
			this.confs[name] = {user: value};
		}
	},
	
	getDefault: function (name) {
		if (!this.confs) return null;
		return this.confs[name].value;
	},
	
	getDescription: function (name) {
		if (!this.confs) return null;
		return this.confs[name].description;
	},
	
	report: function (writer, full) {
		for (name in this.confs) {
			var conf = this.confs[name];
			writer(name + ": " + conf.value + (conf.user ? " (overidden to " + conf.user + ")" : ""));
			if (full) writer(conf.description);
		}
	}
};

HadoopConf.addConf("dfs.namenode.port", 50000,
		"Port number at this namenode is listening");
HadoopConf.addConf("dfs.namenode.ipc.port", 50000,
		"IPC Port number at this namenode is listening");

// GENERATED FROM FILE: core-default.xml

HadoopConf.addConf("io.file.buffer.size", 4096, 
'The size of buffer for use in sequence files. \
  The size of this buffer should probably be a multiple of hardware \
  page size (4096 on Intel x86), and it determines how much data is \
  buffered during read and write operations.');
HadoopConf.addConf("io.bytes.per.checksum", 512, 
'The number of bytes per checksum.  Must not be larger than \
  io.file.buffer.size.');
HadoopConf.addConf("io.skip.checksum.errors", false, 
'If true, when a checksum error is encountered while \
  reading a sequence file, entries are skipped, instead of throwing an \
  exception.');
HadoopConf.addConf("fs.default.name", "file:///", 
'The name of the default file system.  A URI whose \
  scheme and authority determine the FileSystem implementation.  The \
  uri\'s scheme determines the config property (fs.SCHEME.impl) naming \
  the FileSystem implementation class.  The uri\'s authority is used to \
  determine the host, port, etc. for a filesystem.');
HadoopConf.addConf("fs.trash.interval", 0, 
'Number of minutes between trash checkpoints. \
  If zero, the trash feature is disabled.');
HadoopConf.addConf("local.cache.size", 10737418240, 
'The limit on the size of cache you want to keep, set by default \
  to 10GB. This will act as a soft limit on the cache directory for out of band data.');
HadoopConf.addConf("ipc.client.idlethreshold", 4000, 
'Defines the threshold number of connections after which \
               connections will be inspected for idleness.');
HadoopConf.addConf("ipc.client.kill.max", 10, 
'Defines the maximum number of clients to disconnect in one go.');
HadoopConf.addConf("ipc.client.connection.maxidletime", 10000, 
'The maximum time in msec after which a client will bring down the \
               connection to the server.');
HadoopConf.addConf("ipc.client.connect.max.retries", 10, 
'Indicates the number of retries a client will make to establish \
               a server connection.');
HadoopConf.addConf("ipc.server.listen.queue.size", 128, 
'Indicates the length of the listen queue for servers accepting \
               client connections.');
HadoopConf.addConf("ipc.server.tcpnodelay", false, 
'Turn on/off Nagle\'s algorithm for the TCP socket connection on  \
  the server. Setting to true disables the algorithm and may decrease latency \
  with a cost of more/smaller packets.');
HadoopConf.addConf("ipc.client.tcpnodelay", false, 
'Turn on/off Nagle\'s algorithm for the TCP socket connection on  \
  the client. Setting to true disables the algorithm and may decrease latency \
  with a cost of more/smaller packets.');
HadoopConf.addConf("topology.script.file.name", null, 
'The script name that should be invoked to resolve DNS names to \
    NetworkTopology names. Example: the script would take host.foo.bar as an \
    argument, and return /rack1 as the output.');
HadoopConf.addConf("topology.script.number.args", 100, 
'The max number of args that the script configured with  \
    topology.script.file.name should be run with. Each arg is an \
    IP address.');

// GENERATED FROM FILE: hdfs-default.xml

HadoopConf.addConf("dfs.namenode.logging.level", "info", 
		'The logging level for dfs namenode. Other values are \"dir\"(trac \
		e namespace mutations), \"block\"(trace block under/over replications and block \
		creations/deletions), or \"all\".');
HadoopConf.addConf("dfs.datanode.address", 50010, 
		'The address where the datanode server will listen to. \
		If the port is 0 then the server will start on a free port.');
HadoopConf.addConf("dfs.datanode.http.address", 50075, 
		'The datanode http server address and port. \
		If the port is 0 then the server will start on a free port.');
HadoopConf.addConf("dfs.datanode.ipc.address", 50020, 
		'The datanode ipc server address and port. \
		If the port is 0 then the server will start on a free port.');
HadoopConf.addConf("dfs.datanode.handler.count", 3, 
		'The number of server threads for the datanode.');
HadoopConf.addConf("dfs.http.address", "0.0.0.0:50070", 
		'The address and the base port where the dfs namenode web ui will listen on. \
		If the port is 0 then the server will start on a free port.');
HadoopConf.addConf("dfs.datanode.https.address", "0.0.0.0:50475", 'No description');
HadoopConf.addConf("dfs.https.address", "0.0.0.0:50470", 'No description');
HadoopConf.addConf("dfs.replication.considerLoad", true, 
		'Decide if chooseTarget considers the target\'s load or not');
HadoopConf.addConf("dfs.default.chunk.view.size", 32768, 
		'The number of bytes to view for a file on the browser.');
HadoopConf.addConf("dfs.datanode.du.reserved", 0, 
		'Reserved space in bytes per volume. Always leave this much space free for non dfs use.');
HadoopConf.addConf("dfs.replication", 3, 
		'Default block replication.  \
		The actual number of replications can be specified when the file is created. \
		The default is used if replication is not specified in create time.');
HadoopConf.addConf("dfs.replication.max", 512, 'Maximal block replication.');
HadoopConf.addConf("dfs.replication.min", 1, 'Minimal block replication.');
HadoopConf.addConf("dfs.block.size", 67108864, 'The default block size for new files.');
HadoopConf.addConf("dfs.df.interval", 60000, 'Disk usage statistics refresh interval in msec.');
HadoopConf.addConf("dfs.client.block.write.retries", 3, 
		'The number of retries for writing blocks to the data nodes,  \
		before we signal failure to the application.');
HadoopConf.addConf("dfs.blockreport.intervalMsec", 3600000, 
		'Determines block reporting interval in milliseconds.');
HadoopConf.addConf("dfs.blockreport.initialDelay", 0, 
		'Delay for first block report in seconds.');
HadoopConf.addConf("dfs.heartbeat.interval", 3, 
		'Determines datanode heartbeat interval in seconds.');
HadoopConf.addConf("dfs.namenode.handler.count", 10, 
		'The number of server threads for the namenode.');
HadoopConf.addConf("dfs.safemode.threshold.pct", 0.999, 
		'Specifies the percentage of blocks that should satisfy  \
		the minimal replication requirement defined by dfs.replication.min. \
		Values less than or equal to 0 mean not to start in safe mode. \
	Values greater than 1 will make safe mode permanent.');
HadoopConf.addConf("dfs.safemode.extension", 30000, 
		'Determines extension of safe mode in milliseconds after the threshold level is reached.');
HadoopConf.addConf("dfs.balance.bandwidthPerSec", 1048576, 
		'Specifies the maximum amount of bandwidth that each datanode \
		can utilize for the balancing purpose in term of \
		the number of bytes per second.');
HadoopConf.addConf("dfs.hosts", null, 
		'Names a file that contains a list of hosts that are \
		permitted to connect to the namenode. The full pathname of the file \
		must be specified.  If the value is empty, all hosts are permitted.');
HadoopConf.addConf("dfs.hosts.exclude", null, 
		'Names a file that contains a list of hosts that are \
		not permitted to connect to the namenode.  The full pathname of the \
		file must be specified.  If the value is empty, no hosts are excluded.');
HadoopConf.addConf("dfs.max.objects", 0, 
		'The maximum number of files, directories and blocks \
		dfs supports. A value of zero indicates no limit to the number \
		of objects that dfs supports.');
HadoopConf.addConf("dfs.namenode.decommission.interval", 30, 
		'Namenode periodicity in seconds to check if decommission is complete.');
HadoopConf.addConf("dfs.namenode.decommission.nodes.per.interval", 5, 
		'The number of nodes namenode checks if decommission is complete \
		in each dfs.namenode.decommission.interval.');
HadoopConf.addConf("dfs.replication.interval", 3, 
		'The periodicity in seconds with which the namenode computes  \
		repliaction work for datanodes.');
HadoopConf.addConf("dfs.access.time.precision", 3600000, 
		'The access time for HDFS file is precise upto this value.  \
		The default value is 1 hour. Setting a value of 0 disables \
		access times for HDFS.');
HadoopConf.addConf("dfs.support.append", false, 
		'Does HDFS allow appends to files? \
		This is currently set to false because there are bugs in the \
		\"append code\" and is not supported in any prodction cluster.');
HadoopConf.addConf("dfs.datanode.failed.volumes.tolerated", 0, 
		'The number of volumes that are allowed to \
		fail before a datanode stops offering service. By default \
		any volume failure will cause a datanode to shutdown.');

//GENERATED FROM FILE: mapred-default.xml

HadoopConf.addConf("io.sort.factor", "10", 
		'The number of streams to merge at once while sorting \
		files.  This determines the number of open file handles.');
HadoopConf.addConf("io.sort.mb", "100", 
		'The total amount of buffer memory to use while sorting  \
		files, in megabytes.  By default, gives each merge stream 1MB, which \
		should minimize seeks.');
HadoopConf.addConf("io.sort.record.percent", "0.05", 
		'The percentage of io.sort.mb dedicated to tracking record \
		boundaries. Let this value be r, io.sort.mb be x. The maximum number \
		of records collected before the collection thread must block is equal \
		to (r * x) / 4');
HadoopConf.addConf("io.sort.spill.percent", "0.80", 
		'The soft limit in either the buffer or record collection \
		buffers. Once reached, a thread will begin to spill the contents to disk \
		in the background. Note that this does not imply any chunking of data to \
		the spill. A value less than 0.5 is not recommended.');
HadoopConf.addConf("io.map.index.skip", "0", 
		'Number of index entries to skip between each entry. \
		Zero by default. Setting this to values larger than zero can \
		facilitate opening large map files using less memory.');
HadoopConf.addConf("mapred.job.tracker", "local", 
		'The host and port that the MapReduce job tracker runs \
		at.  If \"local\", then jobs are run in-process as a single map \
		and reduce task.');
HadoopConf.addConf("mapred.job.tracker.http.address", "0.0.0.0:50030", 
		'The job tracker http server address and port the server will listen on. \
		If the port is 0 then the server will start on a free port.');
HadoopConf.addConf("mapred.job.tracker.handler.count", "10", 
		'The number of server threads for the JobTracker. This should be roughly \
		4% of the number of tasktracker nodes.');
HadoopConf.addConf("mapred.task.tracker.report.address", "127.0.0.1:0", 
		'The interface and port that task tracker server listens on.  \
		Since it is only connected to by the tasks, it uses the local interface. \
		EXPERT ONLY. Should only be changed if your host does not have the loopback  \
		interface.');
HadoopConf.addConf("mapred.local.dir.minspacestart", "0", 
		'If the space in mapred.local.dir drops under this,  \
		do not ask for more tasks. Value in bytes.');
HadoopConf.addConf("mapred.local.dir.minspacekill", "0", 
		'If the space in mapred.local.dir drops under this,  \
		do not ask more tasks until all the current ones have finished and  \
		cleaned up. Also, to save the rest of the tasks we have running,  \
		kill one of them, to clean up some space. Start with the reduce tasks, \
		then go with the ones that have finished the least. \
Value in bytes.');
HadoopConf.addConf("mapred.tasktracker.expiry.interval", "600000", 
		'Expert: The time-interval, in miliseconds, after which \
		a tasktracker is declared \'lost\' if it doesn\'t send heartbeats.');
HadoopConf.addConf("mapred.tasktracker.taskmemorymanager.monitoring-interval", "5000", 
		'The interval, in milliseconds, for which the tasktracker waits \
		between two cycles of monitoring its tasks\' memory usage. Used only if \
		tasks\' memory management is enabled via mapred.tasktracker.tasks.maxmemory.');
HadoopConf.addConf("mapred.tasktracker.tasks.sleeptime-before-sigkill", "5000", 
		'The time, in milliseconds, the tasktracker waits for sending a \
  		SIGKILL to a process, after it has been sent a SIGTERM.');
HadoopConf.addConf("mapred.map.tasks", "2", 
		'The default number of map tasks per job. \
  		Ignored when mapred.job.tracker is \"local\".');
HadoopConf.addConf("mapred.reduce.tasks", "1", 
		'The default number of reduce tasks per job. Typically set to 99% \
		of the cluster\'s reduce capacity, so that if a node fails the reduces can  \
		still be executed in a single wave. \
		Ignored when mapred.job.tracker is \"local\".');
HadoopConf.addConf("mapreduce.tasktracker.outofband.heartbeat", false, 
		'Expert: Set this to true to let the tasktracker send an  \
		out-of-band heartbeat on task-completion for better latency.');
HadoopConf.addConf("mapred.jobtracker.restart.recover", false, 
		'\"true\" to enable (job) recovery upon restart, \
		\"false\" to start afresh');
HadoopConf.addConf("mapred.jobtracker.job.history.block.size", "3145728", 
		'The block size of the job history file. Since the job recovery \
		uses job history, its important to dump job history to disk as  \
		soon as possible. Note that this is an expert level parameter. \
		The default value is set to 3 MB.');
HadoopConf.addConf("mapreduce.job.split.metainfo.maxsize", "10000000", 
		'The maximum permissible size of the split metainfo file. \
		The JobTracker won\'t attempt to read split metainfo files bigger than \
		the configured value. No limits if set to -1.');
HadoopConf.addConf("mapred.jobtracker.taskScheduler.maxRunningTasksPerJob", null, 
		'The maximum number of running tasks for a job before \
		it gets preempted. No limits if undefined.');
HadoopConf.addConf("mapred.map.max.attempts", 4, 
		'Expert: The maximum number of attempts per map task. \
		In other words, framework will try to execute a map task these many number \
		of times before giving up on it.');
HadoopConf.addConf("mapred.reduce.max.attempts", 4, 
		'Expert: The maximum number of attempts per reduce task. \
		In other words, framework will try to execute a reduce task these many number \
		of times before giving up on it.');
HadoopConf.addConf("mapred.reduce.parallel.copies", 5, 
		'The default number of parallel transfers run by reduce \
during the copy(shuffle) phase.');
HadoopConf.addConf("mapred.reduce.copy.backoff", 300, 
		'The maximum amount of time (in seconds) a reducer spends on  \
fetching one map output before declaring it as failed.');
HadoopConf.addConf("mapreduce.reduce.shuffle.connect.timeout", 180000, 
		'Expert: The maximum amount of time (in milli seconds) a reduce \
		task spends in trying to connect to a tasktracker for getting map output.');
HadoopConf.addConf("mapreduce.reduce.shuffle.read.timeout", 180000, 
		'Expert: The maximum amount of time (in milli seconds) a reduce \
		task waits for map output data to be available for reading after obtaining connection.');
HadoopConf.addConf("mapred.task.timeout", 600000, 
		'The number of milliseconds before a task will be \
		terminated if it neither reads an input, writes an output, nor \
		updates its status string.');
HadoopConf.addConf("mapred.tasktracker.map.tasks.maximum", 2, 
		'The maximum number of map tasks that will be run \
		simultaneously by a task tracker.');
HadoopConf.addConf("mapred.tasktracker.reduce.tasks.maximum", 2, 
		'The maximum number of reduce tasks that will be run \
		simultaneously by a task tracker.');
HadoopConf.addConf("mapred.jobtracker.completeuserjobs.maximum", 100, 
		'The maximum number of complete jobs per user to keep around  \
		before delegating them to the job history.');
HadoopConf.addConf("mapreduce.reduce.input.limit", -1, 
		'The limit on the input size of the reduce. If the estimated \
		input size of the reduce is greater than this value, job is failed. A \
		value of -1 means that there is no limit set.');
HadoopConf.addConf("mapred.job.tracker.retiredjobs.cache.size", 1000, 
		'The number of retired job status to keep in the cache.');
HadoopConf.addConf("mapred.job.tracker.jobhistory.lru.cache.size", 5, 
		'The number of job history files loaded in memory. The jobs are  \
		loaded when they are first accessed. The cache is cleared based on LRU.');
HadoopConf.addConf("mapred.child.ulimit", null, 
		'The maximum virtual memory, in KB, of a process launched by the  \
		Map-Reduce framework. This can be used to control both the Mapper/Reducer  \
		tasks and applications using Hadoop Pipes, Hadoop Streaming etc.  \
		By default it is left unspecified to let cluster admins control it via  \
		limits.conf and other such relevant mechanisms. \
		\
		Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to \
		JavaVM, else the VM might not start.');
HadoopConf.addConf("mapred.cluster.map.memory.mb", -1, 
		'The size, in terms of virtual memory, of a single map slot  \
		in the Map-Reduce framework, used by the scheduler.  \
		A job can ask for multiple slots for a single map task via  \
		mapred.job.map.memory.mb, upto the limit specified by  \
		mapred.cluster.max.map.memory.mb, if the scheduler supports the feature.  \
		The value of -1 indicates that this feature is turned off.');
HadoopConf.addConf("mapred.cluster.reduce.memory.mb", -1, 
		'The size, in terms of virtual memory, of a single reduce slot  \
		in the Map-Reduce framework, used by the scheduler.  \
		A job can ask for multiple slots for a single reduce task via  \
		mapred.job.reduce.memory.mb, upto the limit specified by  \
		mapred.cluster.max.reduce.memory.mb, if the scheduler supports the feature.  \
		The value of -1 indicates that this feature is turned off.');
HadoopConf.addConf("mapred.cluster.max.map.memory.mb", -1, 
		'The maximum size, in terms of virtual memory, of a single map  \
		task launched by the Map-Reduce framework, used by the scheduler.  \
		A job can ask for multiple slots for a single map task via  \
		mapred.job.map.memory.mb, upto the limit specified by  \
		mapred.cluster.max.map.memory.mb, if the scheduler supports the feature.  \
		The value of -1 indicates that this feature is turned off.');
HadoopConf.addConf("mapred.cluster.max.reduce.memory.mb", -1, 
		'The maximum size, in terms of virtual memory, of a single reduce  \
		task launched by the Map-Reduce framework, used by the scheduler.  \
		A job can ask for multiple slots for a single reduce task via  \
		mapred.job.reduce.memory.mb, upto the limit specified by  \
		mapred.cluster.max.reduce.memory.mb, if the scheduler supports the feature.  \
		The value of -1 indicates that this feature is turned off.');
HadoopConf.addConf("mapred.job.map.memory.mb", -1, 
		'The size, in terms of virtual memory, of a single map task  \
		for the job. \
		A job can ask for multiple slots for a single map task, rounded up to the  \
		next multiple of mapred.cluster.map.memory.mb and upto the limit  \
		specified by mapred.cluster.max.map.memory.mb, if the scheduler supports  \
		the feature.  \
		The value of -1 indicates that this feature is turned off iff  \
		mapred.cluster.map.memory.mb is also turned off (-1).');
HadoopConf.addConf("mapred.job.reduce.memory.mb", -1, 
		'The size, in terms of virtual memory, of a single reduce task  \
		for the job. \
		A job can ask for multiple slots for a single map task, rounded up to the  \
		next multiple of mapred.cluster.reduce.memory.mb and upto the limit  \
		specified by mapred.cluster.max.reduce.memory.mb, if the scheduler supports  \
		the feature.  \
		The value of -1 indicates that this feature is turned off iff \
		mapred.cluster.reduce.memory.mb is also turned off (-1).');
HadoopConf.addConf("mapred.inmem.merge.threshold", 1000, 
		'The threshold, in terms of the number of files  \
		for the in-memory merge process. When we accumulate threshold number of files \
		we initiate the in-memory merge and spill to disk. A value of 0 or less than \
		0 indicates we want to DON\'T have any threshold and instead depend only on \
		the ramfs\'s memory consumption to trigger the merge.');
HadoopConf.addConf("mapred.job.shuffle.merge.percent", "0.66", 
		'The usage threshold at which an in-memory merge will be \
		initiated, expressed as a percentage of the total memory allocated to \
		storing in-memory map outputs, as defined by \
		mapred.job.shuffle.input.buffer.percent.');
HadoopConf.addConf("mapred.job.shuffle.input.buffer.percent", 0.70, 
		'The percentage of memory to be allocated from the maximum heap \
		size to storing map outputs during the shuffle.');
HadoopConf.addConf("mapred.job.reduce.input.buffer.percent", 0.0, 
		'The percentage of memory- relative to the maximum heap size- to \
		retain map outputs during the reduce. When the shuffle is concluded, any \
		remaining map outputs in memory must consume less than this threshold before \
		the reduce can begin.');
HadoopConf.addConf("mapred.map.tasks.speculative.execution", true, 
		'If true, then multiple instances of some map tasks may be executed in parallel.');
HadoopConf.addConf("mapred.job.reuse.jvm.num.tasks", 1, 
		'How many tasks to run per jvm. If set to -1, there is no limit.');
HadoopConf.addConf("mapred.min.split.size", 0, 
		'The minimum size chunk that map input should be split \
		into.  Note that some file formats may have minimum split sizes that \
		take priority over this setting.');
HadoopConf.addConf("mapred.jobtracker.maxtasks.per.job", -1, 
		'The maximum number of tasks for a single job. \
		A value of -1 indicates that there is no maximum.');
HadoopConf.addConf("mapred.submit.replication", 10, 
		'The replication level for submitted job files.  This \
		should be around the square root of the number of nodes.');
HadoopConf.addConf("mapred.task.tracker.http.address", "0.0.0.0:50060", 
		'The task tracker http server address and port. \
		If the port is 0 then the server will start on a free port.');
HadoopConf.addConf("keep.failed.task.files", false, 
		'Should the files for failed tasks be kept. This should only be  \
		used on jobs that are failing, because the storage is never \
		reclaimed. It also prevents the map outputs from being erased \
		from the reduce directory as they are consumed.');
HadoopConf.addConf("mapred.output.compress", false, 
		'Should the job outputs be compressed?');
HadoopConf.addConf("mapred.output.compression.type", "RECORD", 
		'If the job outputs are to compressed as SequenceFiles, how should \
		they be compressed? Should be one of NONE, RECORD or BLOCK.');
HadoopConf.addConf("mapred.compress.map.output", false, 
		'Should the outputs of the maps be compressed before being \
		sent across the network. Uses SequenceFile compression.');
HadoopConf.addConf("mapred.userlog.limit.kb", 0, 
	'The maximum size of user-logs of each task in KB. 0 disables the cap.');
HadoopConf.addConf("mapred.userlog.retain.hours", 24, 
		'The maximum time, in hours, for which the user-logs are to be  \
		retained after the job completion.');
HadoopConf.addConf("mapred.user.jobconf.limit", 5242880, 
		'The maximum allowed size of the user jobconf. The  default is set to 5 MB');
HadoopConf.addConf("mapred.hosts", null, 
		'Names a file that contains the list of nodes that may \
		connect to the jobtracker.  If the value is empty, all hosts are permitted.');
HadoopConf.addConf("mapred.hosts.exclude", null, 
		'Names a file that contains the list of hosts that \
		should be excluded by the jobtracker.  If the value is empty, no hosts are excluded.');
HadoopConf.addConf("mapred.heartbeats.in.second", 100, 
		'Expert: Approximate number of heart-beats that could arrive  \
		at JobTracker in a second. Assuming each RPC can be processed  \
		in 10msec, the default value is made 100 RPCs in a second.');
HadoopConf.addConf("mapred.max.tracker.blacklists", 4, 
		'The number of blacklists for a tasktracker by various jobs \
		after which the tasktracker will be marked as potentially \
		faulty and is a candidate for graylisting across all jobs. \
		(Unlike blacklisting, this is advisory; the tracker remains \
		active.  However, it is reported as graylisted in the web UI, \
		with the expectation that chronically graylisted trackers \
		will be manually decommissioned.)  This value is tied to \
		mapred.jobtracker.blacklist.fault-timeout-window; faults \
		older than the window width are forgiven, so the tracker \
		will recover from transient problems.  It will also become \
		healthy after a restart.');
HadoopConf.addConf("mapred.jobtracker.blacklist.fault-timeout-window", 180, 
		'The timeout (in minutes) after which per-job tasktracker \
		faults are forgiven.  The window is logically a circular \
		buffer of time-interval buckets whose width is defined by \
		mapred.jobtracker.blacklist.fault-bucket-width; when the \
		\"now\" pointer moves across a bucket boundary, the previous \
		contents (faults) of the new bucket are cleared.  In other \
		words, the timeout\'s granularity is determined by the bucket width.');
HadoopConf.addConf("mapred.jobtracker.blacklist.fault-bucket-width", 15, 
		'The width (in minutes) of each bucket in the tasktracker \
		fault timeout window.  Each bucket is reused in a circular \
		manner after a full timeout-window interval (defined by \
		mapred.jobtracker.blacklist.fault-timeout-window).');
HadoopConf.addConf("mapred.max.tracker.failures", 4, 
		'The number of task-failures on a tasktracker of a given job  \
		after which new tasks of that job aren\'t assigned to it.');
HadoopConf.addConf("jobclient.output.filter", "FAILED", 
		'The filter for controlling the output of the task\'s userlogs sent \
		to the console of the JobClient.  \
		The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and  ALL.');
HadoopConf.addConf("mapred.job.tracker.persist.jobstatus.active", false, 
		'Indicates if persistency of job status information is active or not.');
HadoopConf.addConf("mapred.job.tracker.persist.jobstatus.hours", 0, 
		'The number of hours job status information is persisted in DFS. \
		The job status information will be available after it drops of the memory \
		queue and between jobtracker restarts. With a zero value the job status \
		information is not persisted at all in DFS.');
HadoopConf.addConf("mapreduce.job.complete.cancel.delegation.tokens", true, 
		'if false - do not unregister/cancel delegation tokens \
		from renewal, because same tokens may be used by spawned jobs');
HadoopConf.addConf("mapred.task.profile", false, 
		'To set whether the system should collect profiler \
		information for some of the tasks in this job? The information is stored \
		in the user log directory. The value is \"true\" if task profiling is enabled.');
HadoopConf.addConf("mapred.task.profile.maps", "0-2", 
		'To set the ranges of map tasks to profile. \
		mapred.task.profile has to be set to true for the value to be accounted.');
HadoopConf.addConf("mapred.task.profile.reduces", "0-2", 
		'To set the ranges of reduce tasks to profile. \
		mapred.task.profile has to be set to true for the value to be accounted.');
HadoopConf.addConf("mapred.line.input.format.linespermap", 1, 
		'Number of lines per split in NLineInputFormat.');
HadoopConf.addConf("mapred.skip.attempts.to.start.skipping", 2, 
		'The number of Task attempts AFTER which skip mode  \
		will be kicked off. When skip mode is kicked off, the  \
		tasks reports the range of records which it will process  \
		next, to the TaskTracker. So that on failures, TT knows which  \
		ones are possibly the bad records. On further executions,  \
		those are skipped.');
HadoopConf.addConf("mapred.skip.map.auto.incr.proc.count", true, 
		'The flag which if set to true,  \
		SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented  \
		by MapRunner after invoking the map function. This value must be set to  \
		false for applications which process the records asynchronously  \
		or buffer the input records. For example streaming.  \
		In such cases applications should increment this counter on their own.');
HadoopConf.addConf("mapred.skip.reduce.auto.incr.proc.count", true, 
		'The flag which if set to true,  \
		SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented  \
		by framework after invoking the reduce function. This value must be set to  \
		false for applications which process the records asynchronously  \
		or buffer the input records. For example streaming.  \
		In such cases applications should increment this counter on their own.');
HadoopConf.addConf("job.end.retry.attempts", 0, 
		'Indicates how many times hadoop should attempt to contact the notification URL');
HadoopConf.addConf("job.end.retry.interval", 30000, 
		'Indicates time in milliseconds between notification URL retry calls');
HadoopConf.addConf("mapred.task.cache.levels", 2, 
		'This is the max level of the task cache. For example, if \
		the level is 2, the tasks cached are at the host level and at the rack level.');
HadoopConf.addConf("mapred.queue.names", "default", 
		'Comma separated list of queues configured for this jobtracker. \
		Jobs are added to queues and schedulers can configure different  \
		scheduling properties for the various queues. To configure a property  \
		for a queue, the name of the queue must match the name specified in this  \
		value. Queue properties that are common to all schedulers are configured  \
		here with the naming convention, mapred.queue.$QUEUE-NAME.$PROPERTY-NAME, \
		for e.g. mapred.queue.default.submit-job-acl. \
		The number of queues configured in this parameter could depend on the \
		type of scheduler being used, as specified in  \
		mapred.jobtracker.taskScheduler. For example, the JobQueueTaskScheduler \
		supports only a single queue, which is the default configured here. \
		Before adding more queues, ensure that the scheduler you\'ve configured \
		supports multiple queues.');
HadoopConf.addConf("mapred.queue.default.state", "RUNNING", 
		'This values defines the state , default queue is in. \
		the values can be either \"STOPPED\" or \"RUNNING\" \
		This value can be changed at runtime.');
HadoopConf.addConf("mapred.job.queue.name", "default", 
		'Queue to which a job is submitted. This must match one of the \
		queues defined in mapred.queue.names for the system. Also, the ACL setup \
		for the queue must allow the current user to submit a job to the queue. \
		Before specifying a queue, ensure that the system is configured with  \
		the queue, and access is allowed for submitting jobs to the queue.');
HadoopConf.addConf("mapred.tasktracker.indexcache.mb", "10", 
		'The maximum memory that a task tracker allows for the  \
		index cache that is used when serving map outputs to reducers.');
HadoopConf.addConf("mapred.merge.recordsBeforeProgress", "10000", 
		'The number of records to process during merge before \
		sending a progress notification to the TaskTracker.');
HadoopConf.addConf("mapred.reduce.slowstart.completed.maps", "0.05", 
		'Fraction of the number of maps in the job which should be  \
		complete before reduces are scheduled for the job.');
HadoopConf.addConf("mapred.task.tracker.task-controller", "org.apache.hadoop.mapred.DefaultTaskController", 
		'TaskController which is used to launch and manage task execution');
HadoopConf.addConf("mapreduce.tasktracker.group", "None", 
		'Expert: Group to which TaskTracker belongs. If \
		LinuxTaskController is configured via mapreduce.tasktracker.taskcontroller, \
		the group owner of the task-controller binary should be same as this group.');
HadoopConf.addConf("mapred.healthChecker.script.path", "None", 
		'Absolute path to the script which is \
		periodicallyrun by the node health monitoring service to determine if \
		the node is healthy or not. If the value of this key is empty or the \
		file does not exist in the location configured here, the node health \
		monitoring service is not started.');
HadoopConf.addConf("mapred.healthChecker.interval", "60000", 
		'Frequency of the node health script to be run, in milliseconds');
HadoopConf.addConf("mapred.healthChecker.script.timeout", "600000", 
		'Time after node health script should be killed if  \
		unresponsive and considered that the script has failed.');
HadoopConf.addConf("mapred.healthChecker.script.args", "None", 
		'List of arguments which are to be passed to  \
		node health script when it is being launched comma seperated.');
HadoopConf.addConf("mapreduce.job.counters.limit", "120", 
		'Limit on the number of counters allowed per job.');