#!/usr/bin/python
#coding=utf-8

import os 
import time
import urllib
import datetime
import threading

def customlog(data):
  try:
        n = data.split('&') 
        list = []
        for x in n:
                data = x.split('=')
                list.append(data)

        dic1 = {}
        for x in list:
          try:
                dic1[x[0]] = x[1] #将列表转为字典
          except IndexError:
                pass

        #定义初始字典
        dic = {'ip': 0 ,'rtime': 0, 'logid' : 0,'logtype':0,'logreason':0,'datetime':0,'uid':0,'mos':0,'mosv':0,'mfo':0,'mfov':0,'webtype':0,'webinfo':0,'cell
id':0,'lac':0,'loc':0,'pro':0, 'mtype':0,'cv':0,'channelid':0,'startid':0,'playid':0,'catecode':0,'playlistid':0,'vid':0,'site':0,'content':0,'memo':0}

        #通过初始字典中的key到新的log字典中取值
        for k in dic.keys():
                if dic1.has_key(k):
                        dic[k] = dic1[k]
                else:
                        pass
        log = []
        #定义log格式
        demo = '''%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n''' %(str(dic['ip']).split('
,')[0], dic['rtime'],dic['logid'],dic['logtype'],dic['logreason'],dic['datetime'],dic['uid'],dic['mos'],dic['mosv'],dic['mfo'],dic['mfov'],dic['webtype'],urll
ib.unquote(urllib.unquote(dic['webinfo'])),dic['cellid'],dic['lac'],dic['loc'],dic['pro'],dic['mtype'],dic['cv'],dic['channelid'],dic['startid'],dic['playid']
,dic['catecode'],dic['playlistid'],dic['vid'],dic['site'],urllib.unquote(urllib.unquote(dic['content'])),urllib.unquote(urllib.unquote(dic['memo'])))
        log.append(demo)
        log.append(dic['rtime'][:10])
        return log

  except AttributeError:
        pass


def putdata(filepath,date):
        day = date[:8]
        hour = date[8:]
        os.system("/data1/opt/hadoop_1.0/bin/hadoop fs -mkdir /user/nlp/warehouse/raw/t_user_play_quality/dt=%s/hour=%s" %(day,hour))
        os.system("/data1/opt/hadoop_1.0/bin/hadoop fs -put %s  /user/nlp/warehouse/raw/t_user_play_quality/dt=%s/hour=%s" %(filepath,day,hour))
        os.system('''hive -e "use raw; alter table  t_user_play_quality  add partition (dt=%s,hour=%s) location '/user/nlp/warehouse/raw/t_user_play_quality/d
t=%s/hour=%s';"''' %(day,hour,day,hour))

def returndata():
        count = 0
        time2 = 2014121908
        f = open('/data3/hadoop_data/2014121908','a+')
        x = os.popen("/data1/opt/kafka/kafka-0.7.0/bin/kafka-run-class.sh kafka.consumer.ConsoleConsumer --zookeeper=10.16.19.76:2181,10.16.19.77:2181,10.16.1
9.78:2181 --group=nlp_kafka --topic=video_upload") #从kafka中获取log
        for i in x:
                try:
                        list = customlog(i)  #将log传给函数进行格式化
                        log = list[0]
                        time1 = int(list[1])
                        if count > 10000:    #每一万行检测一次时间，每小时对log进行分割
                                if time1 <= time2:
                                        f.write(log)
                                        count = 0
                                else:
                                        f.close()
                                        f = open("/data3/hadoop_data/%s" %time1,'a+')
                                        f.write(log)
                                        #putdata("/data3/hadoop_data/%s" %time2, str(time2))
                                        dir = "/data3/hadoop_data/%s" % time2
                                        dt1 = str(time2)
                                        threading.Thread(target=putdata, args=(dir, dt1)).start() #由于log上传到hadoop时间比较长此处启用一个线程上传，不影响log清洗程序
                                        count = 0
                                        time2 = time1
                        else:
                                f.write(log)
                                count += 1
                except:
                        pass



if __name__ == '__main__':
        returndata()