#coding:utf-8
import json
import math
import os
import sys
import time
import requests
from hdfs import InsecureClient
from pyspark.sql import SparkSession
from pyspark.sql.functions import *

def get_namenode_status(hostname):
    jmxport = 50070
    url = "http://{0}:{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus".format(hostname, jmxport)
    try:
        response = requests.get(url)
        jsonString = response.json()
        nnstatus = jsonString.get('beans', [{}])[0].get('State', '')
        if 'active' == nnstatus:
            return 'active'
        elif 'standby' == nnstatus:
            return 'standby'
        else:
            return 'other'
    except:
        return 'error'



def getHDFSClient(nnList, hdfsOperator):
    bGetActivityNN = False
    errorLog = []
    for nn in nnList:

        nnstatus = get_namenode_status(nn)
        print("namenode status:"+str(nnstatus))
        if 'active' != nnstatus:
            continue

        url = 'http://{}:50070'.format(nn)
        try:
            client = InsecureClient(url, user=hdfsOperator, timeout=100)
            client.status('/tmp')
            bGetActivityNN = True
            break
        except Exception as e:
            errorLog.append("NameNode:" +  str(nn) + "; Exception:" +  str(e.message))
            print(errorLog)
    if not bGetActivityNN:
        raise RuntimeError("No available namenode can be found!  " + ''.join(errorLog))
    return client


def deleteFiles(client, hiveTempPath):
    temp_file = client.list(hiveTempPath)
    if (temp_file.__len__() > 0):
        for file in temp_file:
            if not (file[0] == '.' or str(file).endswith('.tmp')):
                try:
                    print(hiveTempPath + '/' + str(file))
                    client.delete(hiveTempPath + '/' + str(file))
                except Exception as e:
                    print(e.message)

def moveFiles(client, hiveTempPath, flumeDataPath, needProtocol, limitNum):
    snappyFileNum=getFileNum(client,hiveTempPath)
    if snappyFileNum > limitNum:
        return
    moveFileCount=0
    for basePath, date_dir, outterFiles in client.walk(flumeDataPath, depth=0):
        sourcePath = basePath + '/'
        directPath = hiveTempPath + '/'
        for datestr in date_dir:
            for protocolParentPath, protocols, files1 in client.walk(basePath + '/' + datestr, depth=0):
                for protocol in protocols:
                    if protocol != needProtocol:
                        continue
                    sourcePath = protocolParentPath + '/' + needProtocol + '/'
                    for snappyParentPath, protocolDir, snappyFiles in client.walk(sourcePath, depth=0):
                        for snappyFile in snappyFiles:
                            if not (snappyFile[0] == '.' or str(snappyFile).endswith('.tmp')):
                                try:
                                    if moveFileCount > limitNum:
                                        return
                                    client.rename(sourcePath + str(snappyFile), directPath + str(snappyFile))
                                    moveFileCount = moveFileCount + 1
                                except Exception as e:
                                    print(e.message)

#return folder zise（GB）
def getHDFSFolderSize(client, sourcePath):
    totalsize = 0
    for path, dir, files in client.walk(sourcePath, depth=0):
        sourcePath = path + '/'
        for file in files:
            if not (file[0] == '.' or str(file).endswith('.tmp')):
                totalsize = totalsize + client.status(sourcePath + str(file))['length']
    return totalsize /1024.0/1024.0/1024.0

#return number of partitions
def getRepartitionByFileSize(hdfsclient,floderPath,stdValue):
    hdfsclient.status(floderPath)
    totalsize = getHDFSFolderSize(hdfsclient,floderPath)
    if (stdValue < 0.001): stdValue = 1
    if totalsize > stdValue:
        return int(totalsize / stdValue) + 1
    else:
        return 1

def getFileNum(hdfsclient,floderPath):
    filecount = 0
    for path,dir,files in hdfsclient.walk(floderPath,depth=0):
        for file in files:
            if not (file[0] == '.' or str(file).endswith('.tmp')):
                filecount = filecount + 1
    return filecount

#return number of partitions
def getRepartitionByFileNum(hdfsclient,floderPath,stdValue):
    filecount = getFileNum(hdfsclient,floderPath)
    if(stdValue < 1): stdValue = 1
    print("Total file number of folders: " + str(filecount))
    repartition = int(math.ceil(filecount / stdValue)) + 1
    if repartition < 1:
        repartition = 1
    return repartition

def readFile(filename):
	f = open(filename, "r")
	data = f.read()
	f.close()
	return data

def backupData(client, hiveTempPath, backupDataPath, needProtocol):
    time_now = time.strftime("%Y-%m-%d")
    backupDataPath=backupDataPath + '/'  + needProtocol + '/' + time_now
    fileExist=client.status(backupDataPath, strict=False)
    if fileExist is None:
        client.makedirs(backupDataPath,permission=777)
    errorLog = []
    for root, snappyFileDir, snappyFiles in client.walk(hiveTempPath, depth=0):
        for snappyFile in snappyFiles:
            try:
                client.rename(hiveTempPath + '/' + str(snappyFile), backupDataPath + '/' + str(snappyFile))
            except Exception as e:
                errorLog.append("backup data:" +  backupDataPath + '/' + str(snappyFile)  + "; Exception:" +  str(e.message))
                print(errorLog)
                client.delete(backupDataPath + '/' + str(snappyFile))
                client.rename(hiveTempPath + '/' + str(snappyFile), backupDataPath + '/' + str(snappyFile))

if __name__ == "__main__":
    print("hello")
