#!/usr/bin/env python
# -*- coding: utf-8 -*

import os,commands,pexpect
from files import *
from auto_ssh import *

class Hadoop:
    def __init__(self,hadoopconf): 
        for line in File().read(hadoopconf):
            if 'user' in line.split('=')[0]:
                self.user = line.split('=')[-1].strip()
            if 'port' in line.split('=')[0]:
                self.port= int(line.split('=')[-1].strip())
            if 'password' in line.split(':')[0]:
                self.password = line.split(':')[-1].strip()
            if 'bash_home' in line.split('=')[0]:
                self.bash_home = line.split('=')[-1].strip()
            if 'hadoop_version' in line.split('=')[0]:
                self.hadoop_version = line.split('=')[-1].strip()
                self.soft_hadoop = '%s/soft/%s' % (self.bash_home,self.hadoop_version)
                self.install_log = '%s/logs/hadoop_install.log' % (self.bash_home)
            if 'JAVAHOME' in line:
                self.JAVAHOME = line.split('=')[-1].strip()
            if 'HADOOPHOME' in line.split('=')[0]:
                self.HADOOPHOME = line.split('=')[-1].strip()
            if 'hadoop_cluster' in line.split('=')[0]:
                self.hadoop_cluster = line.split('=')[-1].strip().split(',')
            if 'datanode_list' in line.split('=')[0]:
                self.datanode_list = line.split('=')[-1].strip().split(',')
            if 'dfsnameservices' in line:
                self.dfsnameservices = line.split('=')[-1].strip()
            if 'namenode1' in line:
                self.namenode1 = line.split('=')[-1].strip()
            if 'namenode2' in line:
                self.namenode2 = line.split('=')[-1].strip()
            if 'Qjournal' in line:
                self.Qjournal = line.split('=')[-1].strip()
                temp=self.Qjournal.split(':8485;')
                laste=temp[-1].split(':8485')[0]
                temp.remove(temp[-1])
                temp.append(laste)
                self.Qjournal_list=temp
            if 'ha_zookeeper_quorum' in line:
                self.ha_zookeeper_quorum = line.split('=')[-1].strip()
            if 'hadoop_tmp_dir' in line:
                temp= line.split('=')[-1].strip()
                self.hadoop_tmp_dir=self.HADOOPHOME+temp.split('$HADOOPHOME')[-1]
            if 'journalnode_dir' in line:
                temp = line.split('=')[-1].strip()
                self.journalnode_dir = self.HADOOPHOME+temp.split('$HADOOPHOME')[-1]
            if 'dfs_namenode_name_dir' in line:
                temp = line.split('=')[-1].strip()
                self.dfs_namenode_name_dir = self.HADOOPHOME+temp.split('$HADOOPHOME')[-1]
            if 'dfs_datanode_data_dir' in line:
                self.dfs_datanode_data_dir = line.split('=')[-1].strip()
            if 'dfs_ha_fencing_ssh_private_key_files' in line:
                self.dfs_ha_fencing_ssh_private_key_files = line.split('=')[-1].strip()
            if 'dfs_hosts_exclude' in line:
                temp= line.split('=')[-1].strip()
                self.dfs_hosts_exclude=self.HADOOPHOME+temp.split('$HADOOPHOME')[-1]
            if 'mapreduce_jobhistory_address' in line:
                self.mapreduce_jobhistory_address = line.split('=')[-1].strip()
            if 'yarn1' in line:
                self.yarn1 = line.split('=')[-1].strip()
            if 'yarn2' in line.split('=')[0]:
                self.yarn2 = line.split('=')[-1].strip()
            if 'yarn_resourcemanager_cluster_id' in line:
                self.yarn_resourcemanager_cluster_id = line.split('=')[-1].strip()
            if 'yarn_resourcemanager_zk_address'in line:
                self.yarn_resourcemanager_zk_address = line.split('=')[-1].strip()
            if 'yarn_nodemanager_resource_memory_mb' in line:
                self.yarn_nodemanager_resource_memory_mb = line.split('=')[-1].strip()
            if 'yarn_resourcemanager_nodes_exclude_path' in line:
                temp= line.split('=')[-1].strip()
                self.yarn_resourcemanager_nodes_exclude_path=self.HADOOPHOME+temp.split('$HADOOPHOME')[-1]
            if 'yarn_nodemanager_resource_cpu_vcores' in line:
                self.yarn_nodemanager_resource_cpu_vcores = line.split('=')[-1].strip()
            if 'yarn_scheduler_maximum_allocation_vcores' in line:
                self.yarn_scheduler_maximum_allocation_vcores = line.split('=')[-1].strip()
        self.sign='*' * 30  + '\n'
        self.hadoop_home='/home/hadoop/cdh5/hadoop'
        self.hadoop_latest = '%s/latest' %(self.hadoop_home)
        self.soft_confrmr = 'rm -rf %s/etc/hadoop/{core-site.xml,hadoop-env.sh,hdfs-site.xml,mapred-env.sh,mapred-site.xml,yarn-env.sh,yarn-site.xml}'%(self.soft_hadoop)
        self.democonfig = 'cp -rfp %s/soft/hadoop_demo/conf/* %s/etc/hadoop/' % (self.bash_home,self.soft_hadoop)
        #print "self.soft_confrmr:%s\nself.democonfig:%s\n" % (self.soft_confrmr,self.democonfig)
        self.ln = 'ln -s %s/%s %s/latest' %(self.hadoop_home,self.hadoop_version,self.hadoop_home)
        self.chown='chown hadoop:hadoop -R %s ' %(self.hadoop_home)
        self.mkdir_hadoop_home='test -d %s || (mkdir -p %s;chown hadoop:hadoop -R %s)' % (self.hadoop_home,self.hadoop_home,self.hadoop_home)
        self.rmr = 'rm -rf %s' % (self.hadoop_home)
        ##hadoop format ###
        self.format_hdfs="su - hadoop -c 'cd %s;%s/bin/hdfs namenode -format >> namenode_format.log' " % (self.hadoop_latest,self.hadoop_latest)
        self.format_zkfc="su - hadoop -c 'cd %s;%s/bin/hdfs zkfc -formatZK > zkfc_format.log' " % (self.hadoop_latest,self.hadoop_latest)
        self.standby="su - hadoop -c 'cd %s;%s/bin/hdfs namenode -bootstrapStandby >> Standby.log' " % (self.hadoop_latest,self.hadoop_latest)
        self.start_nn="su - hadoop -c '%s/sbin/hadoop-daemon.sh start namenode >/dev/null 2>&1' " %(self.hadoop_latest)
        self.start_zkfc="su - hadoop -c '%s/sbin/hadoop-daemon.sh start zkfc >/dev/null 2>&1' " %(self.hadoop_latest)
        self.start_jn="su - hadoop -c '%s/sbin/hadoop-daemon.sh start journalnode >/dev/null 2>&1' " %(self.hadoop_latest)
        self.start_dn="su - hadoop -c '%s/sbin/hadoop-daemon.sh start datanode >/dev/null 2>&1' " %(self.hadoop_latest)
        self.start_rs="su - hadoop -c '%s/sbin/yarn-daemon.sh start resourcemanager >/dev/null 2>&1' " %(self.hadoop_latest)
        self.start_nm="su - hadoop -c '%s/sbin/yarn-daemon.sh start nodemanager >/dev/null 2>&1' " %(self.hadoop_latest)
        self.start_jobhistory="su - hadoop -c '%s/sbin/mr-jobhistory-daemon.sh start historyserver >/dev/null 2>&1' " %(self.hadoop_latest)
        self.auto_start_nn='''echo "%s" >>  /etc/rc.local''' % self.start_nn
        self.auto_start_zkfc='''echo "%s" >>  /etc/rc.local''' % self.start_zkfc
        self.auto_start_jn='''echo "%s" >>  /etc/rc.local''' % self.start_jn
        self.auto_start_dn='''echo "%s" >>  /etc/rc.local''' % self.start_dn
        self.auto_start_rs='''echo "%s" >>  /etc/rc.local''' % self.start_rs
        self.auto_start_nm='''echo "%s" >>  /etc/rc.local''' % self.start_nm
        self.auto_start_jh='''echo "%s" >>  /etc/rc.local''' % self.start_jobhistory
	self.tail='tail -1 /etc/rc.local'
        self.hadoop_env='%s/etc/hadoop/hadoop-env.sh' % (self.soft_hadoop)
        self.yarn_env='%s/etc/hadoop/yarn-env.sh'     % (self.soft_hadoop) 
        self.mapred_env='%s/etc/hadoop/mapred-env.sh' % (self.soft_hadoop) 
        self.core_site='%s/etc/hadoop/core-site.xml'  % (self.soft_hadoop) 
        self.hdfs_site='%s/etc/hadoop/hdfs-site.xml'  % (self.soft_hadoop) 
        self.yarn_site='%s/etc/hadoop/yarn-site.xml'  % (self.soft_hadoop) 
        self.mapred_site='%s/etc/hadoop/mapred-site.xml' % (self.soft_hadoop)
        #print "self.user:\t%s\nself.port:\t%d\nself.password:\t%s\nself.bash_home:\t%s\nself.hadoop_version:\t%s\nself.soft_hadoop:\t%s\nself.install_log:\t%s\nself.JAVAHOME:\t%s\nself.HADOOPHOME:\t%s\nself.hadoop_cluster:\t%s\nself.datanode_list:\t%s\nself.dfsnameservices:\t%s\nself.namenode1:\t%s\nself.namenode2:\t%s\nself.Qjournal:\t%s\nself.Qjournal_list:\t%s\nself.ha_zookeeper_quorum:\t%s\nself.hadoop_tmp_dir:\t%s\nself.journalnode_dir:\t%s\nself.dfs_namenode_name_dir:\t%s\nself.fs_datanode_data_dir:\t%s\nself.dfs_ha_fencing_ssh_private_key_files:\t%s\nself.dfs_hosts_exclude:\t%s\nself.mapreduce_jobhistory_address:\t%s\nself.yarn1:\t%s\nself.yarn2:\t%s\nself.yarn_resourcemanager_cluster_id:\t%s\nself.yarn_resourcemanager_zk_address:\t%s\nself.yarn_nodemanager_resource_memory_mb:\t%s\nself.yarn_resourcemanager_nodes_exclude_path:\t%s\nself.yarn_nodemanager_resource_cpu_vcores:\t%s\nself.yarn_scheduler_maximum_allocation_vcores:\t%s\n" %(self.user,self.port,self.password,self.bash_home,self.hadoop_version,self.soft_hadoop,self.install_log,self.JAVAHOME,self.HADOOPHOME,self.hadoop_cluster,self.datanode_list,self.dfsnameservices,self.namenode1,self.namenode2,self.Qjournal,self.Qjournal_list,self.ha_zookeeper_quorum,self.hadoop_tmp_dir,self.journalnode_dir,self.dfs_namenode_name_dir,self.dfs_datanode_data_dir,self.dfs_ha_fencing_ssh_private_key_files,self.dfs_hosts_exclude,self.mapreduce_jobhistory_address,self.yarn1,self.yarn2,self.yarn_resourcemanager_cluster_id,self.yarn_resourcemanager_zk_address,self.yarn_nodemanager_resource_memory_mb,self.yarn_resourcemanager_nodes_exclude_path,self.yarn_nodemanager_resource_cpu_vcores,self.yarn_scheduler_maximum_allocation_vcores)
        #print "self.sign:\t%s\nself.hadoop_home:\t%s\nself.hadoop_latest:\t%s\nself.ln:\t%s\nself.chown:\t%s\nself.mkdir_hadoop_home:\t%s\nself.rmr:\t%s\nself.format_hdfs:\t%s\nself.format_zkfc:\t%s\nself.standby:\t%s\nself.start_nn:\t%s\nself.start_zkfc:\t%s\nself.start_jn:\t%s\nself.start_dn:\t%s\nself.start_rs:\t%s\nself.start_nm:\t%s\nself.start_jobhistory:\t%s\nself.hadoop_env:\t%s\nself.yarn_env:\t%s\nself.mapred_env:\t%s\nself.core_site:\t%s\nself.hdfs_site:\t%s\nself.mapred_site:\t%s\nself.yarn_site:\t%s\n" %(self.sign,self.hadoop_home,self.hadoop_latest,self.ln,self.chown,self.mkdir_hadoop_home,self.rmr,self.format_hdfs,self.format_zkfc,self.standby,self.start_nn,self.start_zkfc,self.start_jn,self.start_dn,self.start_rs,self.start_nm,self.start_jobhistory,self.hadoop_env,self.yarn_env,self.mapred_env,self.core_site,self.hdfs_site,self.mapred_site,self.yarn_site)

    def init(self):
        if not os.path.exists(self.soft_hadoop):
            tar='/usr/bin/tar zxf %s.tar.gz -C %s/soft/' %(self.soft_hadoop,self.bash_home)
            commands.getoutput(tar)
            commands.getoutput(self.soft_confrmr)
            commands.getoutput(self.democonfig)
        ##*hadoop-env.sh mapred-env.sh yarn-env.sh
        File().fileinput(self.hadoop_env,'JAVAHOME',self.JAVAHOME)
        File().fileinput(self.hadoop_env,'HADOOPHOME',self.HADOOPHOME)
        File().fileinput(self.mapred_env,'JAVAHOME',self.JAVAHOME)
        File().fileinput(self.yarn_env,'JAVAHOME',self.JAVAHOME)

        #core-site.xml
        File().fileinput(self.core_site,'ha_zookeeper_quorum',self.ha_zookeeper_quorum)
        File().fileinput(self.core_site,'hadoop_tmp_dir',self.hadoop_tmp_dir)
        File().fileinput(self.core_site,'dfsnameservices',self.dfsnameservices)

        #hdfs-site.xml
        File().fileinput(self.hdfs_site,'dfsnameservices',self.dfsnameservices)
        File().fileinput(self.hdfs_site,'namenode1',self.namenode1)
        File().fileinput(self.hdfs_site,'namenode2',self.namenode2)
        File().fileinput(self.hdfs_site,'Qjournal',self.Qjournal)
        File().fileinput(self.hdfs_site,'journalnode_dir',self.journalnode_dir)
        File().fileinput(self.hdfs_site,'dfs_namenode_name_dir',self.dfs_namenode_name_dir)
        File().fileinput(self.hdfs_site,'dfs_datanode_data_dir',self.dfs_datanode_data_dir)
        File().fileinput(self.hdfs_site,'dfs_ha_fencing_ssh_private_key_files',self.dfs_ha_fencing_ssh_private_key_files)
        File().fileinput(self.hdfs_site,'dfs_hosts_exclude',self.dfs_hosts_exclude)
        
        #mapred-site.xml:
        File().fileinput(self.mapred_site,'mapreduce_jobhistory_address',self.mapreduce_jobhistory_address)

        #yarn-site.xml:
        File().fileinput(self.yarn_site,'yarn1',self.yarn1)
        File().fileinput(self.yarn_site,'yarn2',self.yarn2)
        File().fileinput(self.yarn_site,'yarn_resourcemanager_cluster_id',self.yarn_resourcemanager_cluster_id)
        File().fileinput(self.yarn_site,'yarn_resourcemanager_zk_address',self.yarn_resourcemanager_zk_address)
        File().fileinput(self.yarn_site,'yarn_nodemanager_resource_memory_mb',self.yarn_nodemanager_resource_memory_mb)
        File().fileinput(self.yarn_site,'yarn_resourcemanager_nodes_exclude_path',self.yarn_resourcemanager_nodes_exclude_path)
        File().fileinput(self.yarn_site,'yarn_nodemanager_resource_cpu_vcores',self.yarn_nodemanager_resource_cpu_vcores)
        File().fileinput(self.yarn_site,'yarn_scheduler_maximum_allocation_vcores',self.yarn_scheduler_maximum_allocation_vcores)
        File().fileinput(self.yarn_site,'mapreduce_jobhistory_address',self.mapreduce_jobhistory_address)

    def rsync(self):
        for node in self.hadoop_cluster:
            File().add(self.install_log,node+'\n')
            Auto_ssh().execute_passwd(node,self.port,self.user,self.password,self.mkdir_hadoop_home)
            scp = 'scp -r %s root@%s:%s' %(self.soft_hadoop,node,self.hadoop_home)
            commands.getoutput(scp)
            Auto_ssh().execute_passwd(node,self.port,self.user,self.password,self.ln)
            Auto_ssh().execute_passwd(node,self.port,self.user,self.password,self.chown)
            for line in Auto_ssh().execute_passwd(node,self.port,self.user,self.password,'ls -l /home/hadoop/cdh5/hadoop'):
                File().add(self.install_log,line)
            File().add(self.install_log,self.sign)

    def startjn(self):
        mkdir_jndir='test -d %s || (mkdir -p %s;chown hadoop:hadoop -R %s)' %(self.journalnode_dir,self.journalnode_dir,self.journalnode_dir)
        jndirls='ls -dl %s' %(self.journalnode_dir)
        status_jn="su - hadoop -c '%s/bin/jps | grep JournalNode'" % (self.JAVAHOME)
        for jn in  self.Qjournal_list:
            Auto_ssh().execute_passwd(jn,self.port,self.user,self.password,mkdir_jndir)
            Auto_ssh().execute_passwd(jn,self.port,self.user,self.password,self.start_jn)
            Auto_ssh().execute_passwd(jn,self.port,self.user,self.password,self.auto_start_jn)
            jndirlog='\ncreate Jndir\t%s\n%sJournalNode status :%s\n' %(jn,(Auto_ssh().execute_passwd(jn,self.port,self.user,self.password,jndirls)[0]),(Auto_ssh().execute_passwd(jn,self.port,self.user,self.password,status_jn)[0]))
            File().add(self.install_log,jndirlog)
        File().add(self.install_log,self.sign)

    def startnn(self):
        mkdir_nndir='test -d %s || (mkdir -p %s;chown hadoop:hadoop -R %s)' %(self.dfs_namenode_name_dir,self.dfs_namenode_name_dir,self.dfs_namenode_name_dir)
        nndirls='ls -dl %s' %(self.dfs_namenode_name_dir)
        mkdir_exclude="su - hadoop -c 'echo > %s'"%(self.dfs_hosts_exclude)
        #nn1
        Auto_ssh().execute_passwd(self.namenode1,self.port,self.user,self.password,mkdir_nndir)
        Auto_ssh().execute_passwd(self.namenode1,self.port,self.user,self.password,mkdir_exclude)
        Auto_ssh().execute_passwd(self.namenode1,self.port,self.user,self.password,self.format_zkfc)
        Auto_ssh().execute_passwd(self.namenode1,self.port,self.user,self.password,self.start_zkfc)
        Auto_ssh().execute_passwd(self.namenode1,self.port,self.user,self.password,self.format_hdfs)
        Auto_ssh().execute_passwd(self.namenode1,self.port,self.user,self.password,self.start_nn)
        Auto_ssh().execute_passwd(self.namenode1,self.port,self.user,self.password,self.auto_start_zkfc)
        Auto_ssh().execute_passwd(self.namenode1,self.port,self.user,self.password,self.auto_start_nn)
        status="su - hadoop -c '%s/bin/jps|grep -v Jps'" % (self.JAVAHOME)
        File().add(self.install_log,self.namenode1+'\n')
        for stat in Auto_ssh().execute_passwd(self.namenode1,self.port,self.user,self.password,status):
            File().add(self.install_log,stat)
        File().add(self.install_log,self.sign)
        ## nn2
        File().add(self.install_log,'\n'+self.namenode2+'\n')
        Auto_ssh().execute_passwd(self.namenode2,self.port,self.user,self.password,mkdir_nndir)
        Auto_ssh().execute_passwd(self.namenode2,self.port,self.user,self.password,mkdir_exclude)
        Auto_ssh().execute_passwd(self.namenode2,self.port,self.user,self.password,self.start_zkfc)
        Auto_ssh().execute_passwd(self.namenode2,self.port,self.user,self.password,self.standby)
        Auto_ssh().execute_passwd(self.namenode2,self.port,self.user,self.password,self.start_nn)
        Auto_ssh().execute_passwd(self.namenode1,self.port,self.user,self.password,self.auto_start_zkfc)
        Auto_ssh().execute_passwd(self.namenode1,self.port,self.user,self.password,self.auto_start_nn)
        status="su - hadoop -c '%s/bin/jps|grep -v Jps'" % (self.JAVAHOME)
        for stat in Auto_ssh().execute_passwd(self.namenode2,self.port,self.user,self.password,status):
            File().add(self.install_log,stat)
        File().add(self.install_log,self.sign)

    def startdn(self):
        for node in self.datanode_list:
            File().add(self.install_log,'\n'+node+'\n')
            for datadir in self.dfs_datanode_data_dir.split(','):
                mkdir_datadir = 'test -d %s || mkdir -p %s' %(datadir,datadir)
                chown_dir='chown hadoop:hadoop %s' % (datadir)
                datadir = "\ncreate datadir %s : \n%s" %(datadir,chown_dir)
                Auto_ssh().execute_passwd(node,self.port,self.user,self.password,mkdir_datadir)
                Auto_ssh().execute_passwd(node,self.port,self.user,self.password,chown_dir)
                #rm='rm -rf %s/*' % datadir
                #Auto_ssh().execute_passwd(node,self.port,self.user,self.password,rm)
            Auto_ssh().execute_passwd(node,self.port,self.user,self.password,self.start_dn)
            Auto_ssh().execute_passwd(node,self.port,self.user,self.password,self.auto_start_dn)
            for dirs in Auto_ssh().execute_passwd(node,self.port,self.user,self.password,'ls -ld /data*/hdfs'):
                File().add(self.install_log,dirs)
            File().add(self.install_log,self.sign)

    def startrs(self):
        File().add(self.install_log,self.sign)
        File().add(self.install_log,'\nYarn cluster install:\n')
        rsmkdir_exclude="su - hadoop -c 'echo > %s'"%(self.yarn_resourcemanager_nodes_exclude_path)
        Auto_ssh().execute_passwd(self.yarn1,self.port,self.user,self.password,rsmkdir_exclude)
        Auto_ssh().execute_passwd(self.yarn1,self.port,self.user,self.password,self.start_rs)
        Auto_ssh().execute_passwd(self.yarn2,self.port,self.user,self.password,rsmkdir_exclude)
        Auto_ssh().execute_passwd(self.yarn2,self.port,self.user,self.password,self.start_rs)
        Auto_ssh().execute_passwd(self.yarn2,self.port,self.user,self.password,self.auto_start_rs)
        status_rs="su - hadoop -c '%s/bin/jps|grep -v Jps|grep ResourceManager'" % (self.JAVAHOME)
        File().add(self.install_log,'\n'+self.yarn1+'\n')
        File().add(self.install_log,(Auto_ssh().execute_passwd(self.yarn1,self.port,self.user,self.password,status_rs)[0]))
        File().add(self.install_log,'\n'+self.yarn2+'\n')
        File().add(self.install_log,(Auto_ssh().execute_passwd(self.yarn2,self.port,self.user,self.password,status_rs)[0]))

    def startnm(self):
        File().add(self.install_log,self.sign)
        status_nm="su - hadoop -c '%s/bin/jps|grep -v Jps|grep NodeManager'" % (self.JAVAHOME)
        File().add(self.install_log,'\nNodemamanger install:\n')
        for node in self.datanode_list:
            Auto_ssh().execute_passwd(node,self.port,self.user,self.password,self.start_nm)
            Auto_ssh().execute_passwd(node,self.port,self.user,self.password,self.auto_start_nm)
            File().add(self.install_log,'\n'+node+'\n')
            File().add(self.install_log,Auto_ssh().execute_passwd(node,self.port,self.user,self.password,status_nm)[0])
        File().add(self.install_log,self.sign)
        Auto_ssh().execute_passwd(self.yarn1,self.port,self.user,self.password,self.start_jobhistory)
        File().add(self.install_log,self.yarn1+'\n')
        status_jh="su - hadoop -c '%s/bin/jps|grep -v Jps|grep JobHistoryServer'" % (self.JAVAHOME)
        File().add(self.install_log,(Auto_ssh().execute_passwd(self.yarn1,self.port,self.user,self.password,status_jh)[0]))
    def template(self):
        descdir = '/var/ftp/hrsjw1/soft/%s' % (self.dfsnameservices)
        if not os.path.exists(descdir):
            commands.getoutput('mkdir -p %s' % (descdir))
            mv = 'mv %s %s/' % (self.soft_hadoop,descdir)
            commands.getoutput(mv)
    
class main():
    files = '/usr/local/src/python_intall/conf/hadoop.conf'
    #Hadoop(files)
    #Hadoop(files).init()
    #Hadoop(files).rsync()
    #Hadoop(files).startjn()
    #Hadoop(files).startnn()
    #Hadoop(files).startdn()
    #Hadoop(files).startrs()
    #Hadoop(files).startnm()
    #Hadoop(files).template()


if __name__ == "__main__":
    main()
