/*
 * Copyright (C) 2012 CNL - ECE Dept - National University of Singapore
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 */
package sg.nus.ece.cnl.veracity.hadoop;

import java.io.File;
import java.io.FileWriter;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.log4j.Logger;
import sg.nus.ece.cnl.veracity.access.SshUserCredentials;
import sg.nus.ece.cnl.veracity.access.TargetServer;
import sg.nus.ece.cnl.veracity.configuration.VeracityDefaultValues;
import sg.nus.ece.cnl.veracity.execution.RemoteCommandExecutor;
import sg.nus.ece.cnl.veracity.hadoop.generator.ConfigurationGenerator;
import sg.nus.ece.cnl.veracity.transfer.ScpTo;

/**
 *
 * @author tram
 */
public class ClusterStarter {

    private static final Logger log = Logger.getLogger(ClusterStarter.class);
    // ClusterStarter instance
    private static ClusterStarter instance;

    private ClusterStarter() {
    }

    public static ClusterStarter getInstance() {

        if (instance == null) {
            instance = new ClusterStarter();
        }
        return instance;
    }

    /**
     * Start the operation of the hadoop cluster
     *
     * @param vmInstances a map of vm nodes of the cluster including master and
     * slave nodes
     */
    /**
     * Starting Hadoop services on Hadoop vm instances
     * @param targetServer target server where Hadoop vm instances are deployed
     * @param userCredentials user credentials used for this operation
     * @param vmInstances a map containing all vm instance name and IP address
     */
    public void startCluster(TargetServer targetServer, SshUserCredentials userCredentials, Map<String, String> vmInstances) {

        String masternode = getMasterNode(vmInstances);
        List<String> slaveNodes = getSlaveNodes(vmInstances);

        try {
            log.info("Creating configuration for hadoop cluster ...");
            
            String coreSiteConf = ConfigurationGenerator.getInstance().getCoreSite(masternode);
            String mapredSiteConf = ConfigurationGenerator.getInstance().getMapredSite(masternode);

            // write these information to files
            // create a temp dir for all conf files
            String tempDirName = System.getProperty("java.io.tmpdir") + File.separator + "hadoop-" + System.nanoTime();
            File tempDir = new File(tempDirName);
            if (!tempDir.exists()) {
                tempDir.mkdirs();
                tempDir.setWritable(true, false);
            }
            // masters file
            File masterNodesFile = new File(tempDir, "masters");
            FileWriter writer = new FileWriter(masterNodesFile);
            writer.write(masternode);
            writer.close();
            //slaves file
            File slaveNodesFile = new File(tempDir, "slaves");
            writer = new FileWriter(slaveNodesFile);
            for (String slave : slaveNodes) {
                writer.write(slave + "\n");
            }
            writer.close();
            // core-site.xml file
            File coreSiteConfFile = new File(tempDir, "core-site.xml");
            writer = new FileWriter(coreSiteConfFile);
            writer.write(coreSiteConf);
            writer.close();
            // mapred-site.xml
            File mapredSiteConfFile = new File(tempDir, "mapred-site.xml");
            writer = new FileWriter(mapredSiteConfFile);
            writer.write(mapredSiteConf);
            writer.close();
            
            // create a temprary directory on xen server and copy all these files to it
            String remoteDirName = "/tmp/hadoopconf-" + System.nanoTime();
            RemoteCommandExecutor.getInstance().exec(targetServer, userCredentials, "mkdir " + remoteDirName);
            
            // create start script
            String startScript = ConfigurationGenerator.getInstance().
                    getStartScript(VeracityDefaultValues.getDefaultHadoopHome(), 
                    VeracityDefaultValues.getDefaultHadoopUserLogin(), remoteDirName);
            
            File startScriptFile = new File(tempDir, "start-script.sh");
            writer = new FileWriter(startScriptFile);
            writer.write(startScript);
            writer.close();
            
            // copy files
            ScpTo.getInstance().copyTo(targetServer, userCredentials, masterNodesFile.getAbsolutePath(), new File(remoteDirName));
            ScpTo.getInstance().copyTo(targetServer, userCredentials, slaveNodesFile.getAbsolutePath(), new File(remoteDirName));
            ScpTo.getInstance().copyTo(targetServer, userCredentials, coreSiteConfFile.getAbsolutePath(), new File(remoteDirName));
            ScpTo.getInstance().copyTo(targetServer, userCredentials, mapredSiteConfFile.getAbsolutePath(), new File(remoteDirName));
            ScpTo.getInstance().copyTo(targetServer, userCredentials, startScriptFile.getAbsolutePath(), new File(remoteDirName));
            
            log.info("Starting Hadoop cluster....");
            
            RemoteCommandExecutor.getInstance().exec(targetServer, userCredentials, "sh " + remoteDirName + "/start-script.sh");
            log.info("Hadoop Cluster has been successfully started. Please connect to master node: \"" + masternode +"\" to execute your job.");
            // clean temporary files on remote xen server
            RemoteCommandExecutor.getInstance().exec(targetServer, userCredentials, "rm -fr " + remoteDirName);
            
        } catch (java.io.IOException ex) {
            log.error("Cannot save hadoop configuration to files: " + ex.getMessage());
        } catch (java.lang.Exception ex) {
            log.error("Cannot generate configuration file for Hadoop Cluster: " + ex.getMessage());
        }
    }

    /**
     * Get the IP of the master node
     *
     * @param hadoopCluster map of VM instances
     * @return IP address of master node
     */
    private String getMasterNode(Map<String, String> vmInstances) {

        for (Entry<String, String> entry : vmInstances.entrySet()) {
            if (entry.getKey().contains("Hadoop-Master")) {
                return entry.getValue();
            }
        }
        return null;
    }

    /**
     * Get all slave node IP
     *
     * @param vmInstances map of VM instances
     * @return list of all IP
     */
    private List<String> getSlaveNodes(Map<String, String> vmInstances) {

        List<String> ipList = new ArrayList<String>();
        for (Entry<String, String> entry : vmInstances.entrySet()) {
            if (entry.getKey().contains("Hadoop-Slave")) {
                ipList.add(entry.getValue());
            }
        }

        return ipList;
    }
}
