'''
Created on Apr 23, 2012

@author: root
'''
from Base import Base
from Disk import Disk
from FileOps import FileOps
#from Filesystem import Filesystem
from Module import Module
from Node import Node
from Project import Project
from Role import Role
from Software2 import Software2
#from User import User
from XML_File import XML_File
from NodeInfo import NodeInfo
import Queue
import csv
import datetime
import os
#import tarfile
import threading
import time
#import yum

class Hadoop(Base):

    
    binName = ""
    binDir = ""
    binPath = ""
    projectName = "hadoop"
    sw =""
    Nodes = []
    Users = []
    clusterName = ""
    project = None
    remoteProjectDir = "/tmp/pachyderm/hadoop"
    goldDir = "./conf/config_files/gold/hadoop"
    backupDir = "./conf/config_files/previous/hadoop"
    configDir = "./conf/config_files"
    installConfigDir = "/usr/lib/gphd/hadoop/conf"
    fileops = None
    nodeInfo = None
    returnQueue2 = Queue.Queue()

          
    def BuildProject(self,Nodes):
     
        modCnt = 0
        ''' Define the Project and Add Appropriate Roles '''
        self.LogInfo("Building Hadoop Project for Installation")
        print "    * Build Hadoop Project for Installation"
        projectConfig = "./conf/project/gphd-hadoop-1.1.def"
           
            
        self.project = Project(self.projectName,"version",self.binPath)
        self.project.AddRole(Role("Hadoop","*"))
        self.project.AddRole(Role("Namenode","n"))
        self.project.AddRole(Role("Datanode","d"))
        self.project.AddRole(Role("Tasktracker","t"))
        self.project.AddRole(Role("Jobtracker","j"))
        self.project.AddRole(Role("SecondaryNamenode","s"))
                
        
        self.LogInfo("Reading Hadoop Project Definition File")
        
        try: 
            for row in csv.DictReader(open(projectConfig), 'personality installorder rpm'.split()):
                if (len(row) == 3):
                    personality = row['personality']
                    installOrder = int(row['installorder'])
                    rpm = row['rpm']
                    self.LogInfo(projectConfig+":"+str(row))
                else:
                    self.LogError("Incorrect Number of Parameters in Hadoop Project Definition File.")
                    print "    * ERROR: Incorrect Number of Parameters in Hadoop Project Definition File."
                                  
                for role in self.project.Roles:
                    if (str(personality) == str(role.personality)):
                        for fileName in Software2.swFilenames:
                            if (str(rpm) in str(fileName)):
                                role.AddModule(Module(fileName,True,installOrder))
                                self.LogInfo("Add Module: "+str(fileName))
                                modCnt += 1
        
                                
                                
            self.LogInfo("Completed Reading Hadoop Project Definition File")
        except Exception, err:
            self.LogError("Problem Loading Hadoop Project Definition File")
            return -1
     
     
     
     
     
    
    def CheckConfig(self, Nodes, Users):
        
        namenodeMax = 1
        namenodeMin = 1   # FIX FOR ISILON
        jobtrackerMax = 1
        jobtrackerMin = 1
        datanodeMin = 1 # FIX FOR ISILON
        tasktrackerMin = 1
        self.nodeInfo = NodeInfo(Nodes, Users)
        namenodeSTR = ""
        datanodesSTR = ""
        jobtrackerSTR = ""
        tasktrackersSTR = ""
        secondarynnSTR = ""
        
        cfgStatus = 0
        
        namenode = self.nodeInfo.GetNameNode()
        for nn in namenode: 
            namenodeSTR = namenodeSTR + "," + str(nn.hostName)
        namenodeSTR = namenodeSTR[1:]
        
        jobtracker = self.nodeInfo.GetJobTracker()
        for jt in jobtracker: 
            jobtrackerSTR = jobtrackerSTR + "," + str(jt.hostName)
        jobtrackerSTR = jobtrackerSTR[1:]
        
        datanodes = self.nodeInfo.GetDataNodes()
        for dn in datanodes: 
            datanodesSTR = datanodesSTR + "," + str(dn.hostName)
        datanodesSTR = datanodesSTR[1:]
        
        tasktrackers = self.nodeInfo.GetTaskTrackers()
        for tt in tasktrackers: 
            tasktrackersSTR = tasktrackersSTR + "," + str(tt.hostName)
        tasktrackersSTR = tasktrackersSTR[1:]
        
        secondarynn = self.nodeInfo.GetSecondaryNameNodes()
        for nn in secondarynn: 
            secondarynnSTR = secondarynnSTR + "," + str(nn.hostName)
        secondarynnSTR = secondarynnSTR[1:]                
                        
        self.LogInfo("Installation Verification Report")
        self.LogInfo("-----------------------------------------")
        self.LogInfo("Project:         " + str(self.projectName))
        self.LogInfo("Namenode(s):     " + str(namenodeSTR))
        self.LogInfo("JobTracker(s):   " + str(jobtrackerSTR))
        self.LogInfo("Datanode(s):     " + str(datanodesSTR))  
        self.LogInfo("TaskTracker(s):  " + str(tasktrackersSTR))
        self.LogInfo("Secondary NN(s): " + str(secondarynnSTR))
        print "---------------------------------------------------"
        print "           INSTALLATION VERIFICATION REPORT    "
        print "---------------------------------------------------"
        print "* Project:         " + str(self.projectName)
        print "* Namenode(s):     " + str(namenodeSTR)
        if (len(namenode) > namenodeMax) or (len(namenode) < namenodeMin):
            print "\n**** ERROR:  Incorrect Namenode Quantity Specified\n"
            cfgStatus = -1
        print "* JobTracker(s):   " + str(jobtrackerSTR)
        if (len(jobtracker) > jobtrackerMax) or (len(jobtracker) < jobtrackerMin):
            print "\n**** ERROR:  Incorrect JobTracker Quantity Specified\n"
            cfgStatus = -1
        print "* Datanode(s):     " + str(datanodesSTR)
        if (len(datanodes) < datanodeMin):
            print "\n**** ERROR:  Incorrect DataNode Quantity Specified\n"
            cfgStatus = -1
        print "* TaskTracker(s):  " + str(tasktrackersSTR)
        if (len(tasktrackers) < tasktrackerMin):
            print "\n**** ERROR:  Incorrect TaskTracker Quantity Specified\n"
            cfgStatus = -1
        print "* Secondary NN(s): " + str(secondarynnSTR)
        if (cfgStatus == 0):
            print "---------------------------------------------------"
            choice = self.query_yes_no("Is this Configuration Correct?", "yes")
            print "---------------------------------------------------"
            if choice:
                return 0
            else:
                return -1
        else:
            time.sleep(4)
            return -1
    
    def LoadNodes(self,configPath):
    
        try: 
            for row in csv.DictReader(open(configPath), 'hostname ipaddress personalities fstype disks'.split()):
                if (len(row) == 5):
                    hostname = row['hostname'] 
                    ipAddress = row['ipaddress']  
                    personalities = "*" + row['personalities']
                    fstype = row['fstype']
                    node = Node(hostname,ipAddress,personalities)
                    for disk in str(row['disks']).split(":"):
                        node.AddDisk(Disk("/dev/"+str(disk),0,fstype,0,0))
                    self.Nodes.append(node)
                    self.LogInfo(row)
                else:
                    self.LogError("Incorrect Number of Parameters in Config File.")

                    return -1
            self.LogInfo("Configuration File Loaded")
            return 0   
        except Exception, err:
            self.LogError("Problem Loading Configuration File")
            return -1
    
        
    def BuildClusterDefinitionFile(self,fileName):
        
        ''' 
        Automtically Build Masters/Slaves File based on TaskTracker Information
        Will rename old version and save it
        '''
        filePath = self.goldDir+"/"+fileName
        prevFile = str(self.backupDir)+"/"+fileName+"."+datetime.datetime.now().strftime("%Y%m%d-%H:%M")
        print "    * Building "+fileName+" file for Cluster"
        try:
            self.fileops.LocalPush(filePath,prevFile)
            self.LogInfo ("Backup of Gold "+fileName+" Complete. Prior version at "+ str(prevFile))
            print "        * Backing up prior version of "+fileName+ " file to "+ prevFile
        except Exception,err:
            self.LogError("Could Not Make Copy of Gold File : "+str(err))
            print "        * ERROR: Could not Backup "+fileName
            return -1
        try:  
            fileHandle = open(filePath,"w")
            if (fileName == "slaves"):
                for node in self.nodeInfo.GetTaskTrackers():
                    fileHandle.write(node.hostName + "\n")
            elif (fileName == "masters"):
                for node in self.nodeInfo.GetSecondaryNameNodes():
                    fileHandle.write(node.hostName + "\n")
            self.LogInfo("Created New Gold "+fileName)
            fileHandle.close()
            print "        * New "+fileName+ " file Built"
            return 0
        except Exception,err:
            self.LogError("Could Not Create New Gold "+fileName+":"+str(err))
            print "        * ERROR: Could not build new "+fileName+ "file"
            return -1
           
    def PushConfig(self,Nodes,Users):
            status = 0
            print "    * Push Modified Versions of Hadoop Configuration Files to Entire Cluster"
            self.LogInfo("Push Modified Versions of Hadoop Configuration Files to Entire Cluster")
            
         
            
            status += self.fileops.Push(Nodes,Users,self.configDir+"/gold/hadoop/masters",self.installConfigDir+"/masters")
            status += self.fileops.Push(Nodes,Users,self.configDir+"/gold/hadoop/slaves",self.installConfigDir+"/slaves")
            status += self.fileops.Push(Nodes,Users,self.goldDir+"/"+"core-site.xml",self.installConfigDir+"/core-site.xml")
            status += self.fileops.Push(Nodes,Users,self.goldDir+"/"+"hdfs-site.xml",self.installConfigDir+"/hdfs-site.xml")
            status += self.fileops.Push(Nodes,Users,self.goldDir+"/"+"mapred-site.xml",self.installConfigDir+"/mapred-site.xml")
        
            if (status ==  0):
                print "    * Completed Push of Modified Versions of Hadoop Configuration Files to Entire Cluster"
                self.LogInfo("Completed Push of Modified Versions of Hadoop Configuration Files to Entire Cluster")
            else:
                print "    * ERROR: Could Not Push Modified Versions of all Hadoop Configuration Files to Entire Cluster"
                self.LogError("Could Not Push Modified Versions of all Hadoop Configuration Files to Entire Cluster")
            return status
        

    def ModifyXMLConfig(self,fileName):
        
        dfsDataDir = ""
        mapredLocalDir = ""
        
        xmlFile = XML_File(self.configDir,fileName)
        prevFile = str(fileName+"."+datetime.datetime.now().strftime("%Y%m%d-%H:%M"))

        #Backup
        print "    * Saving Prior Version of "+fileName+" to "+self.configDir+"/previous/hadoop/"+prevFile
        self.LogInfo("Saving Prior Version of "+fileName+" to "+self.configDir+"/previous/hadoop/"+prevFile)
        self.fileops.LocalPush(self.configDir+"/gold/hadoop/"+fileName,self.configDir+"/previous/hadoop/"+prevFile)

        
        print "    * Making Modifications to the Gold Copy of "+fileName
    
        if (fileName == "core-site.xml"):
            xmlFile.Modify("fs.default.name", "hdfs://"+self.nodeInfo.GetNameNode()[0].hostName+"/")
        elif (fileName == "hdfs-site.xml"):
                 
            for disk in self.Nodes[0].Disks:
                    dfsDataDir += (str(disk.mountPath) + "/hadoop/data,")
            dfsDataDir = dfsDataDir[:-1]
       
            xmlFile.Modify("dfs.http.address", str(self.nodeInfo.GetNameNode()[0].hostName)+":50070")    
            xmlFile.Modify("dfs.name.dir", str(self.nodeInfo.GetNameNode()[0].Disks[0].mountPath)+"/hadoop/name"   )    
            xmlFile.Modify("dfs.data.dir", str(dfsDataDir) )   
            xmlFile.Modify("fs.checkpoint.dir",  str(self.nodeInfo.GetNameNode()[0].Disks[0].mountPath)+"/hadoop/checkpoint" )   
            xmlFile.Modify("dfs.replication", "3" )
        elif (fileName == "mapred-site.xml"):
            
            for disk in self.Nodes[0].Disks:
                mapredLocalDir  += (str(disk.mountPath) + "/mapred/local,")
            mapredLocalDir = mapredLocalDir[:-1]
            xmlFile.Modify("mapred.job.tracker", str(self.nodeInfo.GetJobTracker()[0].hostName) +":9001")   
            xmlFile.Modify("mapred.local.dir", str(mapredLocalDir))
            xmlFile.Modify("mapred.system.dir", "/mapred/system"  )    
            xmlFile.Modify("mapred.temp.dir", "/mapred/temp"  )      

        ''' ADD TRY / EXCEPT code here '''
        
        writeStatus = xmlFile.Write()
        
        
        return writeStatus
     
     
     
     
    def ModifyHadoopEnv(self,hostName):
        found = False
        filePath = "/usr/lib/gphd/hadoop/conf/hadoop-env.sh"
        appendStr = "source /etc/profile.d/java.sh \nsource /etc/profile.d/pachyderm.sh\n"
        sudo = False
        if ("sudo" in str(self.installSudo)):
            self.LogInfo("Appending File as sudo user")
            sudo=True
        (status,results,fileArray) = self.SSHReadFile(hostName,self.installUser,self.installPW,filePath)
        
        for line in fileArray:
            if ("PACHYDERM" in str(line).upper()):
                self.LogInfo("Source Entries already in hadoop-env.sh")
                found = True
        if (not found):        
            (status,results) = self.SSHAppendFile(hostName,self.installUser,self.installPW,filePath,appendStr,sudo)
     
        self.returnQueue2.put((hostName,status,results))

       
    
#    def ModifyPachyEnv(self,hostName):
#        filePath = "/etc/profile.d/pachyderm.sh"
#        #pachyStr = "export HADOOP_HOME=/usr/lib/gphd/hadoop\nexport PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin"
#        status += self.fileops.Push(Nodes,Users,self.configDir+"/gold/hadoop/masters",self.installConfigDir+"/masters")
#
#        
#        #(status,results) = self.SSHCreateFile(hostName,self.installUser,self.installPW,filePath,pachyStr)
#        self.returnQueue2.put((hostName,status,results))


    
    def SetupEnv(self,Nodes,Users):

        envStatus = 0
        threadCount = 0
        Threads = [] 
        self.SetUsers(Users)
        self.returnQueue2.queue.clear()
        
        self.LogInfo("Setting Environment Variables on Cluster Nodes")
        print "    * Setting Environment Variables on Cluster Nodes"
        print "        * Creating /etc/profile.d/pachyderm.sh"
        self.LogInfo("Push "+str(self.configDir)+"/gold/hadoop/pachyderm.sh to all nodes at /etc/profile.d/pachyderm.sh")
        status = self.fileops.Push(Nodes,Users,self.configDir+"/gold/hadoop/pachyderm.sh","/etc/profile.d/pachyderm.sh")

        
#        for node in self.Nodes:
#       
#            Threads.append(threading.Thread(target=self.ModifyPachyEnv,args=(node.hostName,)))
#            Threads[threadCount].daemon=True
#            Threads[threadCount].start()     
#            threadCount = threadCount + 1
#
#        for thread in Threads:
#            thread.join()
#            (hostName,status,results) = self.returnQueue2.get()
        if (status != 0):
          envStatus = -1
            
            
            
        self.returnQueue2.queue.clear()
        threadCount = 0
        Threads = [] 
    
        print "        * Modifying hadoop-env.sh"
        for node in self.Nodes:
       
            Threads.append(threading.Thread(target=self.ModifyHadoopEnv,args=(node.hostName,)))
            Threads[threadCount].daemon=True
            Threads[threadCount].start()     
            threadCount = threadCount + 1
        for thread in Threads:
            thread.join()
            (hostName,status,results) = self.returnQueue2.get()
            if (status == 0):
                self.LogInfo("hadoop-env.sh on "+hostName+ " Created")
                print "        * hadoop-env.sh on "+hostName+ " Created"
            else:
                 self.LogError("Could not modify hadoop-env.sh on "+hostName)
                 print "        * ERROR: Could not modify hadoop-env.sh on "+hostName
                 envStatus = -1    
            
        
            
        if (envStatus == 0):
            self.LogInfo("Environment and Path Setup Completed.")
            
        else:
            self.LogError("Environment and Path Setup Failed ; Exiting")
        return envStatus
    
    
    
    
    
    def SetupPermissions(self,mountPoint,Users):
        self.ClearReturnQueue()
        permStatus = 0
        threadCount = 0
        Threads = [] 
        self.LogInfo("Setting Permissions Across Cluster....")
        print "    * Configuring HDFS Data Directory Permisssions"
        self.SetUsers(Users)

        for node in self.Nodes:
            # NEED TO CHANGE TO SUPPORT GROUP
            permCMD = str(self.installSudo) + "chown -R "+ str(self.hdfsUser) + ":" + str(self.hdfsUser) + " " + str(mountPoint)
            self.LogInfo(permCMD)
            Threads.append(threading.Thread(target=self.SSHExec,args=(permCMD,node.hostName,self.installUser,self.installPW)))
            Threads[threadCount].daemon=True
            Threads[threadCount].start()     
            threadCount = threadCount + 1

        for thread in Threads:
            thread.join()
            (hostName,username,status,results) = self.returnQueue.get()
            if (status == 0):
                self.LogInfo(str(self.hdfsUser) + " Given Ownership "+ str(hostName) + ":" + str(mountPoint))
                print "        * "+ str(self.hdfsUser) + " Given Ownership "+ str(hostName) + ":" + str(mountPoint)
            else:
                self.LogError(str(hostName)+" : "+str(results))
                print "        * ERROR: Could Not Change owner of "+ str(hostName) + ":" + str(mountPoint)
                permStatus = -1
            
        if (permStatus == 0):
            self.LogInfo("Permissions Setup Completed.")
        else:
            self.LogErrorCon("Permissions Setup Failed ; Exiting")
        return permStatus
     



     
    def __init__(self,binPath,clusterName,projectName):
        Base.__init__(self)
        self.binPath = binPath
        
        ''' Take the entry from the -g entry and parse the components '''
        
        binComponents = self.binPath[1:].split('/')
        self.binName = binComponents[len(binComponents)-1]
        binComponents.remove(self.binName)
        for comp in (binComponents):
            self.binDir = self.binDir + "/" + comp
        print "    * Starting Hadoop Software Install on Cluster: " + clusterName
        self.LogInfo("Starting Hadoop Software Install on Cluster: " + clusterName)
        self.fileops = FileOps()
        
#        self.binDir = binDir
#        self.binName = binName

        
