#!/usr/bin/env python
#-*-encoding:utf-8-*-
'''
Created on 2014年12月12日

@author: chenyongbing
'''
import sys,os,datetime,commands,time,re
current_dir = os.path.dirname(__file__)

import logging,os
import logging.handlers
class AutoRun():
    def __init__(self,copy_aliyun=False,testing=False,zmHadoopPath='',zmMapreducePath='',raw_source=False,rawdata_hdfs_path='',rssi_change=False,groupRawPath='/Udisk/rawdata_group/',hadoopPath='/data/ZmHadoop/hadoop-2.3/bin/',LOG_FILE='/data/logs/auto_reun.log'):
        self.init_logger(LOG_FILE=LOG_FILE)
        self.copy_aliyun = copy_aliyun
        self.testing = testing
        self.zmHadoopPath = zmHadoopPath
        self.zmMapreducePath = zmMapreducePath
        self.rssi_change = rssi_change
        self.rawdata_hdfs_path = rawdata_hdfs_path
        self.groupRawPath = groupRawPath
        self.raw_source = raw_source
        self.hadoopPath = hadoopPath
        
    def init_logger(self,LOG_FILE='/data/logs/auto_run.log'):
        handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes = 1024*1024*10, backupCount = 5) # 实例化handler 
        fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(levelname)s - %(message)s'
        
        formatter = logging.Formatter(fmt)   # 实例化formatter
        handler.setFormatter(formatter)      # 为handler添加formatter
        console = logging.StreamHandler()
        console.setFormatter(formatter)
        self.logger = logging.getLogger('AutoRun')    # 获取名为tst的logger
        self.logger.addHandler(handler)           # 为logger添加handler
        self.logger.addHandler(console)
        self.logger.setLevel(logging.DEBUG)

    def get_slist_between_st_et(self,st,et):
        u'''获取2个时间间的所有日期'''
        dlist = []
        count = 0
        nday = (datetime.date(int(st[:4]),int(st[5:7]),int(st[8:]))+ datetime.timedelta(-1)).strftime('%Y-%m-%d')
        while nday < et:
            nday = (datetime.date(int(st[:4]),int(st[5:7]),int(st[8:]))+ datetime.timedelta(count)).strftime('%Y-%m-%d')
            count+=1
            dlist.append(nday)
        return dlist

    def run_commands(self,cmd=''):
        if self.testing:
            self.logger.info(cmd)
            return 0,''
        cout = commands.getstatusoutput(cmd)
        try:
            if cout[0]!=0 and not re.search('No such file or directory',cout[1]):
                self.logger.error(cout[1])
                sys.exit()
        except:
            self.logger.error( 'check out error.')
            sys.exit()

    def auto_hadoop(self,day='',timType='all',check=False,groups=''):
        u'''timType:all,day,week,month,quarter'''
        self.logger.info( 'start run hadoop %s'%day)
        tday_date = datetime.date(int(day[:4]),int(day[5:7]),int(day[8:10])) +datetime.timedelta(1)
        tday = tday_date.strftime('%Y-%m-%d')
    
        if timType in ['all','day']:
            self.logger.info('Start Run Day:%s'%day)
            self.run_commands('sh %s/bin/start.sh --day %s %s'%(self.zmHadoopPath,day,groups))
        if re.search('-01$',tday) and timType in ['all','month'] and check==False:
            #run month
            self.logger.info('Start Run Month:%s'%tday)
            self.run_commands('sh %s/bin/start.sh --month %s'%(self.zmHadoopPath,tday))
        
        if tday_date.weekday()==0 and timType in ['all','week']:
            #run week
            self.logger.info('Start Run Week:%s'%tday)
            self.run_commands('sh %s/bin/start.sh --week %s'%(self.zmHadoopPath,tday))
        
        if (re.search('-01-01',tday) or re.search('-04-01',tday) or re.search('-07-01',tday) or re.search('-10-01',tday)) and timType in ['all','quarter'] and check==False:
            #run quarter
            self.logger.info('Start Run Quarter:%s'%day)
            self.run_commands('sh %s/bin/start.sh --quarter %s'%(self.zmHadoopPath,tday))
        self.logger.info( 'End run hadoop %s'%day)
        
        

        
    def auto_mapreduce(self,rawPath='',day='',groups=None):
        
        self.logger.info( "start run mapreduce day:%s  rawdata"%day)
        if len(os.listdir(rawPath))<=0:
            self.logger.info( 'rawPath : %s not exists.'%rawPath)
            sys.exit()
            
        runCmd ='sh %s/bin/start.sh %s'%(self.zmMapreducePath,day)
        if groups!=None and len(groups)!=0:
            runCmd += ' %s'%groups
        self.logger.debug(runCmd)
        
        self.run_commands(runCmd)
        self.logger.info( "end run mapreduce day:%s  rawdata"%day)
        
    def put_rawdata_to_hdfs(self,rawPath=''):
        self.delete_rawdata_from_hdfs(rawPath)
        if len(os.listdir(rawPath))<=0:
            self.logger.info( 'rawPath : %s not exists.'%rawPath)
            sys.exit()
        self.logger.info('start to put file to hdfs.')
        putCmd = '%s/hadoop fs -put %s %s'%(self.hadoopPath,rawPath,self.rawdata_hdfs_path)  
        self.run_commands(putCmd)
        
        
    def delete_rawdata_from_hdfs(self,rawPath=''):
        delCmd = '%s/hadoop fs -rm -r %s/*'%(self.hadoopPath,self.rawdata_hdfs_path)
        self.run_commands(delCmd)
        
          
    def auto_mapreduce_new(self,rawPath='',day='',groups=None):
        self.put_rawdata_to_hdfs(rawPath)
        def get_map_reduce_stdout_key(out,key):
            try:
                value = re.search('%s=([^\n]+)'%key,out).group(1)
            except:
                self.logger.error('Canot Found Key:%s In MapReduce stdout %s'%(key,out))
                return '0'
            return value.strip()
        
        self.logger.info( "start run mapreduce day:%s  rawdata"%day)
        if len(os.listdir(rawPath))<=0:
            self.logger.info( 'rawPath : %s not exists.'%rawPath)
            return False
            
        runCmd ='sh %s/bin/start.sh %s'%(self.zmMapreducePath,day)
        if groups!=None and len(groups)!=0:
            runCmd += ' %s'%groups
        self.logger.debug(runCmd)
        
        status , stdout = commands.getstatusoutput(runCmd)
        self.logger.info(stdout)
        if not re.search('newjob.RawDataJobRunner: job run exit code: 1',stdout):
            self.logger.error('Check Exit Code Not 1 , Mapreduce Run ERROR(%s).'%day)
            return False
        
        map_input_records = get_map_reduce_stdout_key(stdout,'Map input records')
        map_output_records = get_map_reduce_stdout_key(stdout,'Map output records')
        reduce_input_records = get_map_reduce_stdout_key(stdout,'Reduce input records')
        reduce_output_records = get_map_reduce_stdout_key(stdout,'Reduce output records')
        
        if map_input_records=='0' or map_output_records=='0' or reduce_input_records=='0' or reduce_output_records=='0':
            self.logger.error('Check MapReduce Records Faild , Mapreduce Run Faild(%s).'%day)
            return False
        self.logger.info( "end run mapreduce day:%s  rawdata"%day)
        
        return True
    
    
    def auto_mapreduce_old(self,rawfile='',day=''):
        self.logger.info( "start run mapreduce day:%s  rawdata"%day)
        if not  os.path.exists(rawfile):
            self.logger.info( 'rawfile : %s not exists.'%rawfile)
            sys.exit()
        self.run_commands('sh %s/bin/start_rawdata.sh %s'%(self.zmMapreducePath,day))
        self.logger.info( "end run mapreduce day:%s  rawdata"%day)
        self.run_commands('sh %s/bin/start.sh %s'%(self.zmMapreducePath,day))
        self.logger.info( "end run mapreduce day:%s "%day)


    def unzip_rawdata_from_bakfile(self,day,backPath='/mnt/rawdata/',rawPath='/data/rawdata/'):
        rawFilePath = rawPath + '/' + day +'/'
        rawtarfile = backPath + '/rawdata.%s.txt.tar.gz'%day
        self.run_commands('mkdir -p %s'%rawFilePath) 
                
        while 1:
            self.logger.info("rawdata file %s"%len(os.listdir(rawFilePath)))
            if len(os.listdir(rawFilePath))>0:break
            if  os.path.exists(rawFilePath):
                tarcmd = 'tar -xzvf %s -C %s'%(rawtarfile,rawFilePath)
                self.logger.info(tarcmd)
                self.run_commands(tarcmd) 
                

    def unzip_rawdata_from_group_bakfile(self,day,backPath='/mnt/rawdata/',rawPath='/data/rawdata/',groupRawPath='/data/zmexport/rawdata/rawdata_group/',shop_groups=[],blackInfo={}):
        rawFilePath = rawPath + '/' + day +'/'

        try:
            os.mkdir(rawFilePath)
        except:pass
        self.logger.info('start unzip group rawdata %s.'%day)
        for shop_group in shop_groups:
            if int(shop_group) in blackInfo.keys() and day in blackInfo[int(shop_group)]:continue
            remote_grp_raw_file_tar = groupRawPath+day+'/%s.txt.tar.gz'%shop_group
            if not os.path.exists(remote_grp_raw_file_tar):
                self.logger.info( remote_grp_raw_file_tar+' not exists.')
                continue
            unzip_cmd = 'tar -xzvf %s -C %s'%(remote_grp_raw_file_tar,rawFilePath)
            self.logger.info( unzip_cmd)
            self.run_commands(unzip_cmd)
        self.logger.info( 'cat %s/* > %s/rawdata.%s.txt'%(rawFilePath,rawFilePath,day))
        
        self.run_commands('cat %s/* > %s/rawdata.%s.txt'%(rawFilePath,rawFilePath,day))
        self.logger.info( 'rm -rf %s/[!rawdata]*'%rawFilePath)
        self.run_commands('rm -rf %s/[!rawdata]*'%rawFilePath)
        
        
    def scp_rawdata_from_remote(self,day,rawhost,rawFilePath):
        rawpath = '/usr/java/data/rawdata/netty/%s/'%(day)
        scpcmd = 'scp %s:%s/* %s'%(rawhost,rawpath,rawFilePath)
        while 1:
            if len(os.listdir(rawFilePath))>0:break
            self.run_commands(scpcmd) 
    
    def autoRunByDay(self,day=None,backPath='/mnt/rawdata/',rawPath='/data/rawdata',rawTarPath=None,sourcePath='/usr/java/data/source',runType='mapreduce',timeType='all',groups=None,rawhost='zm02',check=False,blackInfo={}):
        if day==None:
            day = (datetime.date.today()-datetime.timedelta(1)).strftime('%Y-%m-%d')
        yesterday = (datetime.date.today()-datetime.timedelta(1)).strftime('%Y-%m-%d')
        if runType=='mapreduce' or runType=='all':
            self.logger.info( "start run day:%s  rawdata"%day)
            rawfile = rawPath+'/rawdata.%s.txt'%day
            rawtarfile = backPath + '/rawdata.%s.txt.tar.gz'%day
            rawFilePath = rawPath + '/' + day +'/'
            self.run_commands('mkdir -p %s'%rawFilePath) 
            if self.raw_source=='group_rawdata':
                self.unzip_rawdata_from_group_bakfile(day, backPath=backPath, rawPath=rawPath, groupRawPath=self.groupRawPath, shop_groups=None, blackInfo=blackInfo)
            elif  os.path.exists(rawtarfile) and day!=yesterday:
                self.unzip_rawdata_from_bakfile(day, backPath, rawPath)
                
            else:
                self.scp_rawdata_from_remote(day, rawhost, rawFilePath)
            if self.copy_aliyun:
                self.logger.info('Start To Copy Aliyun Rawdata')        
                self.copy_aliyun_data(day=day, rawPath=rawPath)
                
            mapreduce_ret = self.auto_mapreduce_new(rawPath=rawFilePath,day=day,groups=groups)
            self.logger.info('delete rawdata from hdfs.')
            self.delete_rawdata_from_hdfs(rawPath=rawFilePath)
            if os.path.exists(rawFilePath):
                rmCmd = 'rm -rf %s'%rawFilePath
                self.logger.info(rmCmd)
                self.run_commands(rmCmd)
            if mapreduce_ret == False:
                sys.exit(1)
            
            
        if runType=='hadoop' or runType=='all':
            
            self.auto_hadoop(day=day,timType=timeType,check=check,groups=groups)
        if os.path.exists(rawFilePath):
            rmCmd = 'rm -rf %s'%rawFilePath
            self.logger.info(rmCmd)
            self.run_commands(rmCmd)

   
    def load_hive_tab(self,startTime=None,endTime=None,hivePath='/data/ZmHadoop/apache-hive-0.13.0-bin/bin/',hiveTab='hivehistoryrole_rerun',rolePath='/usr/java/data/new_role',ago=30):
        if startTime ==None:
            startTime = (datetime.date.today()-datetime.timedelta(1)).strftime('%Y-%m-%d')
        if endTime == None:
            endTime = (datetime.date.today()-datetime.timedelta(1)).strftime('%Y-%m-%d')
#         endTime = (datetime.date(int(day[:4]),int(day[5:7]),int(day[8:10]))-datetime.timedelta(1)).strftime('%Y-%m-%d')
#         startTime = (datetime.date(int(day[:4]),int(day[5:7]),int(day[8:10]))-datetime.timedelta(ago)).strftime('%Y-%m-%d')
        days = self.get_slist_between_st_et(startTime, endTime)
        #print startTime,endTime
        for day in days:
            roleFile = "%s/%s"%(rolePath,day)
            sql = 'LOAD DATA LOCAL INPATH "%s/%s" OVERWRITE INTO TABLE %s PARTITION(d="%s")'%(rolePath,day,hiveTab,day)
            cmd = "%s/hive -e '%s'"%(hivePath,sql)
            #print sql
            self.logger.info(cmd)
            if not os.path.exists(roleFile):
                self.logger.error('role file %s not exists'%roleFile)
                sys.exit()
            self.run_commands(cmd)
            
            
    def copy_aliyun_data(self,day=None,ftpHost='zm12',ftpPath = '/data/ftp',rawPath='/data/rawdata/'):
        if day ==None:
            day = (datetime.date.today()- datetime.timedelta(1)).strftime('%Y-%m-%d')
        #ftpDay = day[:8]+str(int(day[8:10]))
        ftpDay = day
        if not os.path.exists(rawPath+'/'+day):
            #commands.getstatusoutput('mkdir -p %s/%s'%(rawPath,day))
            self.run_commands('mkdir -p %s/%s'%(rawPath,day))
        for i in range(100):
            aliyun_rawfile = '%s/%s/rawdata.%s.%s.txt'%(ftpPath,ftpDay,ftpDay,i)
            local_rawfile = '%s/%s/aliyun.rawdata.%s.%s.txt'%(rawPath,day,day,i)
            
            cmd = 'scp %s:%s %s'%(ftpHost,aliyun_rawfile,local_rawfile)
            if self.testing:
                self.logger.info(cmd)
                status,output = 0,''
            else:
                
                status,output = commands.getstatusoutput(cmd)
#             status , output = [1,'11']
            if status != 0 and re.search('No such file or directory',output):
                break
            elif status!=0:
                continue
            else:
                i+=1
            
        
# myAutoRun = AutoRun()
# myAutoRun.copy_aliyun_data()
# sys.exit()
            
#autoRun(days,'/Udisk/rawdata','/usr/java/data/source',runType='hadoop')
if __name__ == '__main__':
    import argparse,re,datetime
    parser = argparse.ArgumentParser(description='args')
    parser.add_argument('--startTime',metavar=u"",default=None)
    parser.add_argument('--endTime',metavar=u"requests",default=None)
    parser.add_argument('--runType',metavar=u"run type:all,mapreduce,hadoop",default='all')
    parser.add_argument('--timeType',metavar=u"all,day,week,month,quarter",default='all')
    parser.add_argument('--groups',metavar=u'company groups',default=None)
    parser.add_argument('--copy_aliyun',action='store_true',help='aliyun rawdata')
    parser.add_argument('--load_hive',metavar='yes,no,only',default='no')
    parser.add_argument('--check',action='store_true',help='check')
    parser.add_argument('--raw_source',metavar='full_rawdata\group_rawdata',default='full_rawdata')
    parser.add_argument('--testing',action='store_true',help='testing')
    args = parser.parse_args()
    
    st = args.startTime
    et = args.endTime
    runType = args.runType
    timeType = args.timeType
    comp_groups = args.groups
    copy_aliyun = args.copy_aliyun
    load_hive = args.load_hive
    check = args.check
    testing = args.testing
    raw_source = args.raw_source
    if st==None:
        if check==True:
            st = datetime.date.today().strftime('%Y-%m-%d')
        else:
            st = (datetime.date.today() - datetime.timedelta(1)).strftime('%Y-%m-%d')
    if et==None:
        if check == True:
            et  = datetime.date.today().strftime('%Y-%m-%d')
        else:
            et = (datetime.date.today() - datetime.timedelta(1)).strftime('%Y-%m-%d')
    
    
    rawTarPath = None
    
    runName = 'prerun'
    if runName!=None:
        LOG_FILE = '/data/logs/auto_run.%s.log'%runName
        rawPath = '/data/rawdatatmp_%s/'%runName
        zmHadoopPath = '/data/%s/zm-hadoop/'%runName
        zmMapreducePath = '/data/%s/zm-mapreduce/'%runName
        hiveTab = 'hivehistoryrole_%s'%runName
        rolePath = '/data/%s/data/new_role'%runName
        rawdata_hdfs_path = '/rawdata_%s/'%runName
    else:
        LOG_FILE = '/data/logs/auto_run.log'
        rawPath = '/data/rawdatatmp/'
        zmHadoopPath = '/usr/java/zm-hadoop/'
        zmMapreducePath = '/usr/java/zm-mapreduce/'
        hiveTab = 'newhivehistoryrole'
        rolePath = '/usr/java/data/new_role'
        rawdata_hdfs_path = '/rawdata/'
    groupRawPath = '/Udisk/rawdata_group/'    
    sourcePath = '/usr/java/data/source'
    backPath = '/Udisk/rawdata/'
    hivePath = '/data/ZmHadoop/apache-hive-0.13.0-bin/bin/'
    hadoopPath = '/data/ZmHadoop/hadoop-2.3/bin/'
    
    myAutoRun = AutoRun(copy_aliyun=copy_aliyun,testing=testing,raw_source=raw_source,zmHadoopPath=zmHadoopPath,zmMapreducePath=zmMapreducePath,rawdata_hdfs_path=rawdata_hdfs_path,groupRawPath=groupRawPath,hadoopPath=hadoopPath,LOG_FILE=LOG_FILE)
    days = myAutoRun.get_slist_between_st_et(st,et)
    if load_hive in ['yes','only']:
        myAutoRun.load_hive_tab(day=st,hivePath=hivePath, hiveTab=hiveTab, rolePath=rolePath,ago=180)
     
    if load_hive != 'only':
        for day in days:
            myAutoRun.autoRunByDay(day,backPath=backPath,rawPath=rawPath,rawTarPath=rawTarPath,sourcePath=sourcePath,runType=runType,timeType=timeType,groups=comp_groups,check=check)
