import math
import os

print "simple script collections"
print "updated by Wei by 2012/11/19"

clusterMachineOKList = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]
ROUND_NUMBER = 22

#step0
def makeTheCorrespondingFolders():
    print "step0"
    print "make the corresponding folders."
    for OKCluterNumber in clusterMachineOKList:
        directory = "/data/weijiang/BingDataSetDownloading/urls-related/" + "round%02d" % ROUND_NUMBER + "/hadoop" + "%02d" % OKCluterNumber + "webPageDownloading" + "-urls" + "-round%02d" % ROUND_NUMBER
        print directory
        os.mkdir(directory)

#step1
def moveTheCorrespondingDataFilesAndCopyTheInitBlackListIntoTheLocalReadyFolder():
    print "step1"
    print "move the corresponding data files and copy the init black list into the local ready folder."
    for counter in range(0,17):
        cmd_mv = "mv /data/weijiang/BingDataSetDownloading/urls-related/" + "round%02d" % ROUND_NUMBER + "/urls-part" + "%02d" % counter + "-OutOf" + str(len(clusterMachineOKList)) + "-hadoop%02d" % clusterMachineOKList[counter]+ "-round%02d" % ROUND_NUMBER + " /data/weijiang/BingDataSetDownloading/urls-related/" + "round%02d" % ROUND_NUMBER +"/hadoop" + "%02d" % clusterMachineOKList[counter] + "webPageDownloading" + "-urls" + "-round%02d" % ROUND_NUMBER + "/"
        cmd_cp = "cp /data/weijiang/BingDataSetDownloading/programs/url-dynamic-update-black-list-patterns" + " /data/weijiang/BingDataSetDownloading/urls-related/" + "round%02d" % ROUND_NUMBER + "/hadoop" + "%02d" % clusterMachineOKList[counter] + "webPageDownloading" + "-urls" + "-round%02d" % ROUND_NUMBER + "/"
        print cmd_mv
        print cmd_cp
        os.system( cmd_mv )
        os.system( cmd_cp )

#step2
def copyTheCorrespondingProgramsIntoTheLocalReadyFolder():
    print "step2"
    print "copy the corresponding programs(python modules) into the local ready folder."
    for counter in range(0,17):
        cmd_cp = "cp /data/weijiang/BingDataSetDownloading/programs/retriever-multi-wei-20120616-release.py" + " /data/weijiang/BingDataSetDownloading/urls-related/" + "round%02d" % ROUND_NUMBER + "/hadoop" + "%02d" % clusterMachineOKList[counter] + "webPageDownloading" + "-urls" + "-round%02d" % ROUND_NUMBER + "/"
        cmd_cp2 = "cp /data/weijiang/BingDataSetDownloading/programs/robotexclusionrulesparser.py" + " /data/weijiang/BingDataSetDownloading/urls-related/" + "round%02d" % ROUND_NUMBER + "/hadoop" + "%02d" % clusterMachineOKList[counter] + "webPageDownloading" + "-urls" + "-round%02d" % ROUND_NUMBER + "/"
        print cmd_cp
        print cmd_cp2
        os.system( cmd_cp )
        os.system( cmd_cp2 )

#step3
def copyAllTheReadyWebPageDownloadingFoldersIntoTheClusterMachines():
    print "step3"
    print "copy all the ready web page downloading folders into the cluster machines."
    #cause the hadoop01 has some problem currently, so the pangolin will take the job of hadoop01, in this case, the folder doesn't need to upload to the hadoop01.
    for counter in range(0,17):
        cmd_cp = "rcp -r" + " /data/weijiang/BingDataSetDownloading/urls-related/" + "round%02d" % ROUND_NUMBER + "/hadoop" + "%02d" % clusterMachineOKList[counter] + "webPageDownloading" + "-urls" + "-round%02d" % ROUND_NUMBER +" akkking@hadoop" + "%02d" % clusterMachineOKList[counter] + ":~"
        print cmd_cp
        os.system( cmd_cp )





def copyAllTheIndexRelatedFilesIntoCluster():
    print "copy All The Index Related Files Into Cluster"
    print "0"
    cmd_login = "ssh akkking@hadoop01"
    os.system( cmd_login )
    print "1"
    cmd_mkdir = "mkdir distributedQueryProcessingSlave"
    os.system( cmd_mkdir )
    print "2"

def getStatistics():
    #step6
    # get the statistics from each cluster machine and generate the overall statistics in final.
    targetURLFileName = "z01-url"
    Number_OF_MACHINES_USED = 13
    
    totalNumberOfURLsInFile = 0
    totalNumberOfPagesToProcess = 0
    numberOfPagesSuccedToProcess = 0
    numberOfPagesFailedToProcess = 0
    totalTimeUsedToProcess = 0
    
    for counter in range(1,14):
        #especially for the incident happended in hadoop06 and hadoop09
        if counter !=6 and counter != 9:
            fileName = "/data/weijiang/clusterPageDownloading/z01-url-related/z01-url-statistics-collection/statisticsInfoFile" + "-hadoop" + "%02d" % counter + "-part" + "%02d" % counter + "OutOf13" + "-" + targetURLFileName
            fileHandler = open(fileName,"r")
            
            line0 = fileHandler.readline()
            line1 = fileHandler.readline()
            line2 = fileHandler.readline()
            line3 = fileHandler.readline()
            line4 = fileHandler.readline()
            line5 = fileHandler.readline()
            lastLine = fileHandler.readlines()[-1]
            
            #print "lastLine:",lastLine
            #print line0
            #print line1
            totalNumberOfURLsInFile += int( line2.split(":")[1] )
            totalNumberOfPagesToProcess += int( line3.split(":")[1] )
            numberOfPagesSuccedToProcess += int( line4.split(":")[1] )
            numberOfPagesFailedToProcess += int( line5.split(":")[1] )
            totalTimeUsedToProcess += float( lastLine.split(":")[1].split("\n")[0] )
    
    print "process file name:",targetURLFileName
    print "total Number Of URLs In File:",totalNumberOfURLsInFile
    print "total Number Of Pages To Process:",totalNumberOfPagesToProcess
    print "number Of Pages Succeed To Process:",numberOfPagesSuccedToProcess
    print "number Of Pages Failed To Process:",numberOfPagesFailedToProcess
    print "total Time Used To Process in secs:",totalTimeUsedToProcess
    print "total Time Used To Process in hours:",totalTimeUsedToProcess/3600
    print "total Time Used To Process in days:",totalTimeUsedToProcess/3600/24
    print "Percentage of total job finished: 2/7"
    print "Number_OF_MACHINES_USED:",Number_OF_MACHINES_USED

def usefulScripts():
    #No step at all, just some useful script.
    print "produce the filesListedForDirCluewebDataCompressedFileMapping.txt"
    currentLine = ""
    lineElementsList = []
    
    outputFileName = "/data1/team/weijiang/machine-learning-project-related/auxFiles/filesListedForDirCluewebDataCompressedFileMapping.txt"
    
    outputFileHandler = open(outputFileName,"w")
    outputLinesList = []
    
    for dirname, dirnames, filenames in os.walk('/data1/team/weijiang/machine-learning-project-related/auxFiles/cluewebDataCompressedFileMapping'):
        #for subdirname in dirnames:
        #    print os.path.join(dirname, subdirname)
        
        for filename in filenames:
            currentLine = filename
            outputLinesList.append(currentLine)
    
    outputLinesList.sort(cmp=None, key=None, reverse=False)
    for currentElement in outputLinesList:
        print currentElement
        outputFileHandler.write(currentElement + "\n")
        
    outputFileHandler.close()
    '''
    
    '''
    print "produce the filesListedForDirCluewebDataDocsMapping.txt"
    currentLine = ""
    lineElementsList = []
    
    outputFileName = "/data1/team/weijiang/machine-learning-project-related/auxFiles/filesListedForDirCluewebDataDocsMapping.txt"
    
    outputFileHandler = open(outputFileName,"w")
    outputLinesList = []
    
    for dirname, dirnames, filenames in os.walk('/data1/team/weijiang/machine-learning-project-related/auxFiles/cluewebDataDocsMapping'):
        #for subdirname in dirnames:
        #    print os.path.join(dirname, subdirname)
        
        for filename in filenames:
            currentLine = filename
            outputLinesList.append(currentLine)
    
    outputLinesList.sort(cmp=None, key=None, reverse=False)
    for currentElement in outputLinesList:
        print currentElement
        outputFileHandler.write(currentElement + "\n")
        
    outputFileHandler.close()

def usefulScripts1():
    for i in range(1,18):
        path = "/data/weijiang/ClusterDistrubtedSearchEngineSimulation/" + "hadoop" + "%02d" % i + "/" + "distributedQueryProcessingSlave/"
        os.mkdir(path)

def usefulScripts2():
    for i in range(1,2):
        nav_cmd = "cd /data/weijiang/ClusterDistrubtedSearchEngineSimulation/" + "hadoop" + "%02d" % i + "/" + "distributedQueryProcessingSlave/"

# step0:
# makeTheCorrespondingFolders()

# step1:
# moveTheCorrespondingDataFilesAndCopyTheInitBlackListIntoTheLocalReadyFolder()

# step2:
# copyTheCorrespondingProgramsIntoTheLocalReadyFolder()

# step3:
# updated 2012/11/19 by Wei 
copyAllTheReadyWebPageDownloadingFoldersIntoTheClusterMachines()



#copyAllTheIndexRelatedFilesIntoCluster()

#usefulScripts1()
#usefulScripts2()

print "end"