package edu.indiana.d2i.vmm.cluster;

import java.io.InputStream;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;


import edu.indiana.d2i.vmm.cloud.CloudInstanceInfo;
import edu.indiana.d2i.vmm.cluster.util.HTRCStringUtil;

public class HadoopManager {
	/* VM image dependencies */
	private final String user = "hduser";
	private final String passwd = "hduser";
	private final String HADOOP_HOME = "/home/hduser/hadoop-0.20.2/";
	
	private CloudInstanceInfo[] vmlist = null;
	private SSHSession master = null;
	
	public static final Log LOG = LogFactory.getLog(HadoopManager.class);

	private String core_site_tmpl = "<?xml version=\"1.0\"?> \n"
			+ "<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?> \n"
			+ "<configuration> <property>"
			+ "<name>hadoop.tmp.dir</name> <value>/mnt/app/hadoop/tmp</value> "
			+ "<description>A base for other temporary directories.</description></property>"
			+ "<property> <name>fs.default.name</name> <value>hdfs://master:54310</value> "
			+ "<description>The name of the default file system.  A URI whose scheme and "
			+ "authority determine the FileSystem implementation.  The uri's scheme determines "
			+ "the config property (fs.SCHEME.impl) naming the FileSystem implementation class.  "
			+ "The uri's authority is used to determine the host, port, etc. for a filesystem."
			+ "</description></property></configuration>";

	private String mapred_site_tmpl = "<?xml version=\"1.0\"?>"
			+ "<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?> <configuration> <property>"
			+ "<name>mapred.job.tracker</name> <value>master:54311</value>"
			+ "<description>The host and port that the MapReduce job tracker runs at. "
			+ "If \"local\", then jobs are run in-process as a single map and reduce task. "
			+ "</description></property></configuration>";

	private String hdfs_site_tmpl = "<?xml version=\"1.0\"?>"
			+ "<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>"
			+ "<configuration><property> <name>dfs.replication</name> <value>2</value> "
			+ "<description>Default block replication. The actual number of replications "
			+ "can be specified when the file is created. The default is used if replication is "
			+ "not specified in create time. </description></property></configuration>";

	private boolean readyToRunJob() throws Exception {
		// check datanode
		String cmd = HADOOP_HOME + "bin/hadoop dfsadmin -report";
		String screen = master.execCmdReturnScreenOutput(cmd, false).toString();
		int index = HTRCStringUtil.findIndexAfterPattern(screen, "Datanodes available: ");
		if (index == -1) return false; // throw exception!!!
		String sub = screen.substring(index);
		int datanodes = Integer.valueOf(sub.substring(0, sub.indexOf(" ")));
		return (datanodes == vmlist.length-1) ? true : false ;
	}
	
	public HadoopManager(CloudInstanceInfo[] vmlist) {
		this.vmlist = vmlist;
	}

	public void startup() throws Exception {
		// master configuration		
		master = new SSHSession(vmlist[0].publicIP, user, passwd);
		master.execCmd("/home/hduser/hadoop-0.20.2/bin/stop-all.sh", false);
		master.flush();		
		
		StringBuilder hosts = new StringBuilder();
		StringBuilder slaves = new StringBuilder();
		hosts.append(vmlist[0].privateIP + " master \n");
		for (int i = 1; i < vmlist.length; i++) {
			hosts.append(vmlist[i].privateIP + " slave" + i + "\n");
			slaves.append("slave" + i + "\n");
		}
		master.writeSmallFile(hosts.toString(), "/etc/hosts", true);
		master.execCmd("hostname master", true);
		master.writeSmallFile("master"+"\n",
				"/home/hduser/hadoop-0.20.2/conf/masters", false);
		master.writeSmallFile(slaves.toString(),
				"/home/hduser/hadoop-0.20.2/conf/slaves", false);
		master.writeSmallFile(core_site_tmpl, "/home/hduser/hadoop-0.20.2/conf/core-site.xml", false);
		master.writeSmallFile(mapred_site_tmpl, "/home/hduser/hadoop-0.20.2/conf/mapred-site.xml", false);
		master.writeSmallFile(hdfs_site_tmpl, "/home/hduser/hadoop-0.20.2/conf/hdfs-site.xml", false);
		master.execCmd("rm -f /home/hduser/.ssh/known_hosts", false);
		master.execCmd(
				"ssh-keyscan -f /home/hduser/hadoop-0.20.2/conf/slaves -t rsa >> ~/.ssh/known_hosts",
				false);
		master.execCmd(
				"ssh-keyscan -f /home/hduser/hadoop-0.20.2/conf/masters -t rsa >> ~/.ssh/known_hosts",
				false);
		master.execCmd("rm -fr /mnt/app/hadoop/tmp", true);
		master.execCmd("mkdir -p /mnt/app/hadoop/tmp", true);
		master.execCmd("chown hduser:hadoop /mnt/app/hadoop/tmp", true);
		master.execCmd("rm -fr /home/hduser/hadoop-0.20.2/logs", true);
		master.flush();
		LOG.info("Finish master configuration.");
		System.out.println("Finish master configuration.");
		
		Thread.sleep(2000);
		
		// slave configuration
		for (int i = 1; i < vmlist.length; i++) {
			SSHSession slave = new SSHSession(vmlist[i].publicIP, user, passwd);
			slave.execCmd("hostname slave" + i, true);
			slave.writeSmallFile(hosts.toString(), "/etc/hosts", true);
			slave.writeSmallFile("master", "/home/hduser/hadoop-0.20.2/conf/masters", false);
			slave.writeSmallFile(core_site_tmpl, "/home/hduser/hadoop-0.20.2/conf/core-site.xml", false);
			slave.writeSmallFile(mapred_site_tmpl, "/home/hduser/hadoop-0.20.2/conf/mapred-site.xml", false);
			slave.writeSmallFile(hdfs_site_tmpl, "/home/hduser/hadoop-0.20.2/conf/hdfs-site.xml", false);
			slave.execCmd("rm -fr /mnt/app/hadoop/tmp", true);
			slave.execCmd("mkdir -p /mnt/app/hadoop/tmp", true);
			slave.execCmd("chown hduser:hadoop /mnt/app/hadoop/tmp", true);
			slave.execCmd("rm -fr /home/hduser/hadoop-0.20.2/logs", true);
			slave.close();
			
			System.out.println("Finish slave " + i);
		}
		LOG.info("Finish slaves configuration.");
		System.out.println("Finish slaves configuration.");
		
		// format HDFS
		master.execCmd("/home/hduser/hadoop-0.20.2/bin/hadoop namenode -format", false);
		
		// start the cluster
		master.execCmd("/home/hduser/hadoop-0.20.2/bin/start-all.sh", false);
		master.flush();
		LOG.info("Format namenode and start the Hadoop cluster.");
	}
	
	public void uploadFile(InputStream file, String filename) throws Exception {
		master.writeFile(file, HADOOP_HOME + filename.trim(), false);
		LOG.info("Upload file " + filename.trim() + " to " + vmlist[0].publicIP + "@" + HADOOP_HOME + filename.trim());
	}
	
	/* return Hadoop job ID */
	public String submitJob(InputStream jobFile, String jobFileName, InputStream propFile, String jobPropFileName, String[] args) throws Exception {
		// file must be a jar file
		uploadFile(propFile, jobPropFileName);
		uploadFile(jobFile, jobFileName);
		
		LOG.debug("check cluster");
		while (!readyToRunJob()) {
			Thread.sleep(5000);
			LOG.info("Waiting for Hadoop...");
		}
		
		StringBuffer prevScreen = master.execCmdReturnScreenOutput(HADOOP_HOME+ "bin/hadoop job -list all", false);
		StringBuilder arglist = new StringBuilder();
		for (int i = 0; i < args.length; i++) arglist.append(args[i] + " ");
//		String cmd = String.format(HADOOP_HOME + "bin/hadoop jar %s %s %s", HADOOP_HOME + jobFileName.trim(), HADOOP_HOME + jobPropFileName.trim(), arglist.toString());
		String cmd = String.format(HADOOP_HOME + "bin/hadoop jar %s %s %s &", HADOOP_HOME + jobFileName.trim(), HADOOP_HOME + jobPropFileName.trim(), arglist.toString());
		LOG.debug(cmd);
		master.execCmdWithoutBlocking(cmd, false);
		LOG.info("Execute run job command.");
		
		// parse the job id
		StringBuffer screen = master.execCmdReturnScreenOutput(HADOOP_HOME+ "bin/hadoop job -list all", false);
		while (screen.length() == prevScreen.length()) {
			Thread.sleep(2500);
			screen = master.execCmdReturnScreenOutput(HADOOP_HOME+ "bin/hadoop job -list all", false);
		}
		LOG.debug(screen);
		
		String[] lines = screen.toString().split("\n");
		return lines[lines.length - 1].split("\\s+")[0];
	}
	
	public String getJobStatusFromScreen(String clusterJobId) throws Exception {
		String cmd = HADOOP_HOME+ "bin/hadoop job -status " + clusterJobId;
		StringBuffer screen = master.execCmdReturnScreenOutput(cmd, false);
		return screen.toString();
	}
	
	public boolean isJobFinishedOrFailed(String clusterJobId) throws Exception {		
		int state = getHadoopJobState(clusterJobId);
		return (state == 2 || state == 3) ? true : false;
	}
	
	public int getHadoopJobState(String clusterJobId) throws Exception {
		String cmd = HADOOP_HOME+ "bin/hadoop job -list all";
		String screen = master.execCmdReturnScreenOutput(cmd, false).toString();		
		int index = screen.indexOf(clusterJobId);
		if (index == -1) throw new Exception("Error to parse screen output while listing jobs status"); // throw exception!!!
		String sub = screen.substring(index);
		int state = Integer.valueOf(sub.split("\\s+")[1]);
		return state;
	}
	
	public void stop() throws Exception {
		master.execCmd("/home/hduser/hadoop-0.20.2/bin/stop-all.sh", false);
		master.close();
	}
}
