#!/usr/bin/env python
#-*-encoding:utf-8-*-
'''
Created on 2014年12月12日

@author: chenyongbing
'''
import sys,os,datetime,commands,time,re
current_dir = os.path.dirname(__file__)

import logging,os
import logging.handlers
class AutoRun():
    def __init__(self,copy_aliyun=False,raw_source='full_rawdata',rssi_change=False):
        self.init_logger()
        self.copy_aliyun = copy_aliyun
        self.raw_source = raw_source
        self.rssi_change = rssi_change
    def init_logger(self):
        LOG_FILE = '/data/logs/auto_run.rerun.log'
        
        handler = logging.handlers.RotatingFileHandler(current_dir+'/'+LOG_FILE, maxBytes = 1024*1024*10, backupCount = 5) # 实例化handler 
        fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
        
        formatter = logging.Formatter(fmt)   # 实例化formatter
        handler.setFormatter(formatter)      # 为handler添加formatter
        
        self.logger = logging.getLogger('AutoRun')    # 获取名为tst的logger
        self.logger.addHandler(handler)           # 为logger添加handler
        self.logger.setLevel(logging.DEBUG)

    def get_slist_between_st_et(self,st,et):
        u'''获取2个时间间的所有日期'''
        dlist = []
        count = 0
        nday = (datetime.date(int(st[:4]),int(st[5:7]),int(st[8:]))+ datetime.timedelta(-1)).strftime('%Y-%m-%d')
        while nday < et:
            nday = (datetime.date(int(st[:4]),int(st[5:7]),int(st[8:]))+ datetime.timedelta(count)).strftime('%Y-%m-%d')
            count+=1
            dlist.append(nday)
        return dlist

    def run_commands(self,cmd=''):
        cout = commands.getstatusoutput(cmd)
        try:
            if cout[0]!=0:
                self.logger.error(cout[1])
                sys.exit()
        except:
            self.logger.error( 'check out error.')
            sys.exit()

        
    def auto_hadoop(self,day='',softPath='/data/soft',timType='all',groups=''):
        u'''timType:all,day,week,month,quarter'''
        self.logger.info( 'start run hadoop %s'%day)
        tday_date = datetime.date(int(day[:4]),int(day[5:7]),int(day[8:10])) +datetime.timedelta(1)
        tday = tday_date.strftime('%Y-%m-%d')
    
        if timType in ['all','day']:
            self.logger.info('Start Run Day:%s'%day)
            self.run_commands('sh %s/zm-hadoop/bin/start.sh --day %s %s'%(softPath,day,groups))
        if re.search('-01$',tday) and timType in ['all','month']:
            #run month
            self.logger.info('Start Run Month:%s'%tday)
            self.run_commands('sh %s/zm-hadoop/bin/start.sh --month %s %s'%(softPath,tday,groups))
        
        if tday_date.weekday()==0 and timType in ['all','week']:
            #run week
            self.logger.info('Start Run Week:%s'%tday)
            self.run_commands('sh %s/zm-hadoop/bin/start.sh --week %s %s'%(softPath,tday,groups))
        
        if (re.search('-01-01',tday) or re.search('-04-01',tday) or re.search('-07-01',tday) or re.search('-10-01',tday)) and timType in ['all','quarter']:
            #run quarter
            self.logger.info('Start Run Quarter:%s'%tday)
            self.run_commands('sh %s/zm-hadoop/bin/start.sh --quarter %s %s'%(softPath,tday,groups))
        self.logger.info( 'End run hadoop %s'%day)
    def auto_mapreduce_new(self,rawPath='',day='',groups=None):
        self.put_rawdata_to_hdfs(rawPath)
        def get_map_reduce_stdout_key(out,key):
            try:
                value = re.search('%s=([^\n]+)'%key,out).group(1)
            except:
                self.logger.error('Canot Found Key:%s In MapReduce stdout %s'%(key,out))
                return '0'
            return value.strip()
        
        self.logger.info( "start run mapreduce day:%s  rawdata"%day)
        if len(os.listdir(rawPath))<=0:
            self.logger.info( 'rawPath : %s not exists.'%rawPath)
            return False
            
        runCmd ='sh %s/bin/start.sh %s'%(self.zmMapreducePath,day)
        if groups!=None and len(groups)!=0:
            runCmd += ' %s'%groups
        self.logger.debug(runCmd)
        
        status , stdout = commands.getstatusoutput(runCmd)
        if not re.search('newjob.RawDataJobRunner: job run exit code: 1',stdout):
            self.logger.error('Check Exit Code Not 1 , Mapreduce Run Faild(%s).'%day)
            return False
        self.logger.info(stdout)
        map_input_records = get_map_reduce_stdout_key(stdout,'Map input records')
        map_output_records = get_map_reduce_stdout_key(stdout,'Map output records')
        reduce_input_records = get_map_reduce_stdout_key(stdout,'Reduce input records')
        reduce_output_records = get_map_reduce_stdout_key(stdout,'Reduce output records')
        
        if map_input_records=='0' or map_output_records=='0' or reduce_input_records=='0' or reduce_output_records=='0':
            self.logger.error('Check MapReduce Records Faild , Mapreduce Run Faild(%s).'%day)
            return False
        self.logger.info( "end run mapreduce day:%s  rawdata"%day)
        
        return True

    def auto_mapreduce(self,rawPath='',day='',softPath='/data/soft',groups=None):
        self.logger.info( "start run mapreduce day:%s  rawdata"%day)
        if len(os.listdir(rawPath))<=0:
            self.logger.info( 'rawPath : %s not exists.'%rawPath)
            sys.exit()
            
        runCmd ='sh %s/zm-mapreduce/bin/start.sh %s'%(softPath,day)
        if groups!=None and len(groups)!=0:
            runCmd += ' %s'%groups
        self.logger.debug(runCmd)
        self.run_commands(runCmd)
        
        self.logger.info( "end run mapreduce day:%s  rawdata"%day)
        
    
    def auto_mapreduce_old(self,rawfile='',day='',softPath='/data/soft'):
        self.logger.info( "start run mapreduce day:%s  rawdata"%day)
        if not  os.path.exists(rawfile):
            self.logger.info( 'rawfile : %s not exists.'%rawfile)
            sys.exit()
        self.run_commands('sh %s/zm-mapreduce/bin/start_rawdata.sh %s'%(softPath,day))
        self.logger.info( "end run mapreduce day:%s  rawdata"%day)
        self.run_commands('sh %s/zm-mapreduce/bin/start.sh %s'%(softPath,day))
        self.logger.info( "end run mapreduce day:%s "%day)
    
    
    def unzip_rawdata_from_bakfile(self,day,backPath='/mnt/rawdata/',rawPath='/data/rawdata/'):
        rawFilePath = rawPath + '/' + day +'/'
        rawtarfile = backPath + '/rawdata.%s.txt.tar.gz'%day
        self.run_commands('mkdir -p %s'%rawFilePath) 
                
        while 1:
            self.logger.info("rawdata file %s"%len(os.listdir(rawFilePath)))
            if len(os.listdir(rawFilePath))>0:break
            if  os.path.exists(rawFilePath):
                tarcmd = 'tar -xzvf %s -C %s'%(rawtarfile,rawFilePath)
                self.logger.info(tarcmd)
                self.run_commands(tarcmd) 
        if self.copy_aliyun:
            self.logger.info('Start To Copy Aliyun Rawdata')        
            self.copy_aliyun_data(day=day, rawPath=rawPath)
        
        
        
    def unzip_rawdata_from_group_bakfile(self,day,backPath='/mnt/rawdata/',rawPath='/data/rawdata/',groupRawPath='/data/zmexport/rawdata/rawdata_group/'):
    	#碧桂园
    	groups  = [32016036,32016129,32016130,32016131,32016133,32016134,32016145,32016146,32016147,32016150] 	

    	#合荔国际贸易（上海）
    	#groups = [32016065]
    	#广州猜想
    	#groups = [32016060,32016077]
            #雅诗兰黛
    	#groups = [32013137,32013138,32013139,32013140,32013151,32014260,32014277,32014281,32014282,32014284]
    	#美宜佳
    	#groups = [32010268,32010269,32015522,32015523,32015524,32015525,32015526,32015527,32015528,32015530,32015531,32015532]
    	
    	#全新大药房
    	#groups = [32010109,32014378]

            #groups  = [32010078,32010261,32010432,32010991,32010993,32010995,32013847,32013848,32013849,32013850]
            #blackInfo = {32010078:self.get_slist_between_st_et("2015-06-12","2015-07-06")}
            #groups = [32010420,32012151,32012152,32012153,32013747,32013748,32013750,32013751,32013753,32013757,32013758,32013760,32013761,32013762,32013763,32013764,32013781,32013782,32013786,32013792,32013793,32013796,32013798,32013800,32013804,32013806,32013808,32013809,32013811,32013828,32013830,32013832,32013834,32013836,32013837,32013838,32013840,32013842,32013844,32013865,32013934,32014063,32014120,32014158,32014159,32014164,32014166,32014167,32014194,32014478,32014479,32014480,32014481,32014482,32014484,32014486,32014488,32014929,32014947,32015093,32015098,32015107]
    	#blackInfo = {32013800:['2015-06-27','2015-06-28'],32014482:['2015-05-14','2015-05-27','2015-05-29','2015-06-04'],32013763:['2015-05-25','2015-06-22']}
    	#groups = [32010264,32010265,32010266,32010267,32010268,32010269,32015053,32015054,32015521,32015522,32015523,32015524,32015525,32015526,32015527,32015528,32015529,32015530,32015531,32015532]
    	blackInfo = {}
    	rawFilePath = rawPath + '/' + day +'/'

    	try:
    	    os.mkdir(rawFilePath)
    	except:pass
            print 'start unzip group rawdata.'
    	print day
    	print groups
        for group in groups:
            if int(group) in blackInfo.keys() and day in blackInfo[int(group)]:continue
            remote_grp_raw_file_tar = groupRawPath+day+'/%s.txt.tar.gz'%group
            print remote_grp_raw_file_tar
	    #grp_raw_file_tar = rawFilePath+'/%s.txt.tar.gz'%group
            if not os.path.exists(remote_grp_raw_file_tar):
		print remote_grp_raw_file_tar,'not exists.'
                continue
            #scp_cmd = 'scp zm13:%s %s'(remote_grp_raw_file_tar,rawFilePath)
            #self.run_commands(scp_cmd)
            
            unzip_cmd = 'tar -xzvf %s -C %s'%(remote_grp_raw_file_tar,rawFilePath)
            print unzip_cmd
	    self.run_commands(unzip_cmd)
        #self.run_commands('rm -rf %s/*.txt.tar.gz'%rawFilePath)
        print 'cat %s/* > %s/rawdata.%s.txt'%(rawFilePath,rawFilePath,day)
        print 'rm -rf %s/[!rawdata]*'%rawFilePath
        self.run_commands('cat %s/* > %s/rawdata.%s.txt'%(rawFilePath,rawFilePath,day))
        self.run_commands('rm -rf %s/[!rawdata]*'%rawFilePath)
        if self.copy_aliyun:
            self.logger.info('Start To Copy Aliyun Rawdata')        
            self.copy_aliyun_data(day=day, rawPath=rawPath)
        
    
    def put_rawdata_to_hdfs(self,rawPath=''):
        self.delete_rawdata_from_hdfs(rawPath)
        if len(os.listdir(rawPath))<=0:
            self.logger.info( 'rawPath : %s not exists.'%rawPath)
            sys.exit()
        self.logger.info('start to put file to hdfs.')
        putCmd = '%s/hadoop fs -put %s %s'%(self.hadoopPath,rawPath,self.rawdata_hdfs_path)  
        self.run_commands(putCmd)
        
        
    def delete_rawdata_from_hdfs(self,rawPath=''):
        delCmd = '%s/hadoop fs -rm -r %s/*'%(self.hadoopPath,self.rawdata_hdfs_path)
        self.run_commands(delCmd) 
    
    def autoRun(self,days,backPath='/mnt/rawdata/',rawPath='/data/rawdata',rawTarPath=None,sourcePath='/usr/java/data/source',softPath = '/data/soft',runType='mapreduce',timeType='all',groups=None):
        
        for day in days:
            while 1:
                now_hour = int(datetime.datetime.today().strftime('%H'))
                if now_hour<=4 or now_hour>=23:
                    time.sleep(30*60)
                    continue
                break

            if self.rssi_change:
                self.logger.info('change rssi on redis %s'%day)
                self.change_group_rssi_config(day,recover=False)
            
            if runType=='mapreduce' or runType=='all':
                self.logger.info( "start run day:%s  rawdata"%day)
                rawfile = rawPath+'/rawdata.%s.txt'%day
                
                rawFilePath = rawPath + '/' + day +'/'
                
                if self.raw_source == 'group_rawdata':
                    self.unzip_rawdata_from_group_bakfile(day, backPath=backPath, rawPath=rawPath,groupRawPath='/Udisk/rawdata_group/') 
                else:
                    self.unzip_rawdata_from_bakfile(day, backPath=backPath, rawPath=rawPath)
                    
                #self.auto_mapreduce(rawPath=rawFilePath,day=day,softPath=softPath,groups=groups)
                mapreduce_ret = self.auto_mapreduce_new(rawPath=rawFilePath,day=day,groups=groups)
                self.logger.info('delete rawdata from hdfs.')
                self.delete_rawdata_from_hdfs(rawPath=rawFilePath)
                if mapreduce_ret == False:
                    sys.exit(1)
            if runType=='hadoop' or runType=='all':
                pass
                self.auto_hadoop(day=day,softPath=softPath,timType=timeType,groups=groups)
            if os.path.exists(rawFilePath):
                rmCmd = 'rm -rf %s'%rawFilePath
                self.logger.info(rmCmd)
                self.run_commands(rmCmd)
            if self.rssi_change:
                self.logger.info('recover rssi on redis %s'%day)
                self.change_group_rssi_config(day,recover=True)
   
    def load_hive_tab(self,startTime=None,endTime=None,hivePath='/data/ZmHadoop/apache-hive-0.13.0-bin/bin/',hiveTab='hivehistoryrole_rerun',rolePath='/usr/java/data/new_role',ago=30):
        if endTime ==None:
            endTime = (datetime.date.today()-datetime.timedelta(1)).strftime('%Y-%m-%d')
    	if startTime == None:
    	    endTime = startTime
            days = self.get_slist_between_st_et(startTime, endTime)
        #print startTime,endTime
        for day in days:
            roleFile = "%s/%s"%(rolePath,day)
            sql = 'LOAD DATA LOCAL INPATH "%s/%s" OVERWRITE INTO TABLE %s PARTITION(d="%s")'%(rolePath,day,hiveTab,day)
            cmd = "%s/hive -e '%s'"%(hivePath,sql)
            #print sql
            self.logger.info(cmd)
            if not os.path.exists(roleFile):
                self.logger.error('role file %s not exists'%roleFile)
                sys.exit()
            self.run_commands(cmd)
            
            
    def copy_aliyun_data(self,day=None,ftpHost='zm12',ftpPath = '/data/ftp',rawPath='/data/rawdata/'):
        if day ==None:
            day = (datetime.date.today()- datetime.timedelta(1)).strftime('%Y-%m-%d')
        #ftpDay = day[:8]+str(int(day[8:10]))
        ftpDay = day
        if not os.path.exists(rawPath+'/'+day):
            commands.getstatusoutput('mkdir -p %s/%s'%(rawPath,day))
        for i in range(100):
            aliyun_rawfile = '%s/%s/rawdata.%s.%s.txt'%(ftpPath,ftpDay,ftpDay,i)
            local_rawfile = '%s/%s/aliyun.rawdata.%s.%s.txt'%(rawPath,day,day,i)
            
            cmd = 'scp %s:%s %s'%(ftpHost,aliyun_rawfile,local_rawfile)
            print cmd
            status,output = commands.getstatusoutput(cmd)
#             status , output = [1,'11']
            if status != 0 and re.search('No such file or directory',output):
                break
            elif status!=0:
                continue
            else:
                i+=1
            
    def change_group_rssi_config(self,day,host='zm07',port=6379,groups=[],recover=False):
        history_datas = {'cfg_comp_32014391_r_c_m_r':'-60',
                 'cfg_comp_32014391_r_w_m_r':'-70'
                 }
        change_datas = {'cfg_comp_32014391_r_c_m_r':'0',
                       'cfg_comp_32014391_r_w_m_r':'0'
                 }
        change_days = ['2015-05-06']
        if day not in change_days:return
        if recover==True:
            datas = history_datas
        else:
            datas = change_datas
        for key,value in datas.items():
            cmd = 'redis-cli -h %s -p %s set %s %s'%(host,port,key,value)
            self.run_commands(cmd)
            
            
# myAutoRun = AutoRun()
# myAutoRun.copy_aliyun_data()
# sys.exit()
            
#autoRun(days,'/Udisk/rawdata','/usr/java/data/source',runType='hadoop')
if __name__ == '__main__':
    import argparse,re,datetime
    parser = argparse.ArgumentParser(description='args')
    parser.add_argument('--startTime',metavar=u"",default=None)
    parser.add_argument('--endTime',metavar=u"requests",default=None)
    parser.add_argument('--runType',metavar=u"run type:all,mapreduce,hadoop",default='all')
    parser.add_argument('--timeType',metavar=u"all,day,week,month,quarter",default='all')
    parser.add_argument('--groups',metavar=u'groups',default=None)
    parser.add_argument('--copy_aliyun',action='store_true',help='aliyun rawdata')
    parser.add_argument('--load_hive',metavar='yes,no,only',default='no')
    parser.add_argument('--raw_source',metavar='full_rawdata\group_rawdata',default='full_rawdata')
    parser.add_argument('--rssi_change',action='store_true',help='aliyun rawdata')
    args = parser.parse_args()
    
    st = args.startTime
    et = args.endTime
    runType = args.runType
    timeType = args.timeType
    groups = args.groups
    copy_aliyun = args.copy_aliyun
    load_hive = args.load_hive
    raw_source = args.raw_source
    rssi_change = args.rssi_change
    if st==None:
        st = (datetime.date.today() - datetime.timedelta(1)).strftime('%Y-%m-%d')
    if et==None:
        et = (datetime.date.today() - datetime.timedelta(1)).strftime('%Y-%m-%d')
    myAutoRun = AutoRun(copy_aliyun=copy_aliyun,rssi_change=rssi_change,raw_source=raw_source)
    days = myAutoRun.get_slist_between_st_et(st,et)
    
    rawTarPath = None
    rawPath = '/data/rawdata/'
    #rawPath = '/Users/chenyongbing/Downloads'
    softPath = '/data/rerun/'
    sourcePath = '/usr/java/data/source'
    backPath = '/Udisk/rawdata/'
    hiveTab = 'hivehistoryrole_rerun'
    rolePath = '/data/java/data/new_role'
    hivePath = '/data/ZmHadoop/apache-hive-0.13.0-bin/bin/'
    if load_hive in ['yes','only']:
        myAutoRun.load_hive_tab(startTime=st,endTime=et,hivePath=hivePath, hiveTab=hiveTab, rolePath=rolePath,ago=180)
    
    if load_hive != 'only':
        myAutoRun.autoRun(days,backPath=backPath,rawPath=rawPath,rawTarPath=rawTarPath,sourcePath=sourcePath,softPath=softPath,runType=runType,timeType=timeType,groups=groups)
