import pandas
import re
import json
import collections
import os
import time

now_path = os.path.abspath(os.path.dirname(__file__))
def getTime():
    return str(int(time.time()))
#读取文件函数
##返回一个数组 数组中包含DataFrame
def getPkl(md5):
    pkl_file_path = now_path + '/cache_file/' + md5 + '.pkl'
    has_pkl = os.path.exists(pkl_file_path)

    if has_pkl:
        df = pandas.read_pickle(pkl_file_path)
        return df

    return None

def generatorToDataFrame(generator_):
    chunks = []

    while True:
        try:
            df = next(generator_)
            packedData(df)

            chunks.append(df)

        except StopIteration:
            break

    if len(chunks) <= 0:
        return pandas.DataFrame()

    df = pandas.concat(chunks, ignore_index=True)

    return df

def readFile(file_, md5):
    #得到后缀名
    suffix = getFileSuffix(file_.filename)
    # has_pkl = None
    # pkl_file_path = ""
    if md5:
        # pkl_file_path = 'cache_file/' + md5 + '.pkl'

        df = getPkl(md5)

        if df is not None:
            return df
    # if md5:
    #     pkl_file_path = 'cache_file/' + md5 + '.pkl'
    #     has_pkl = os.path.exists(pkl_file_path)
    #
    # if has_pkl:
    #     df = pandas.read_pickle(pkl_file_path)
    #     return df


    #excel后缀名
    ruleExcel = r'xlsx|xls'
    ruleCsv = r'csv'


    #判断文件类型

    #读取到excel类型文件
    if re.match(ruleExcel, suffix):

        #读取该文件的所有表
        excel = pandas.read_excel(file_, sheet_name=None)

        return excel

    elif re.match(ruleCsv, suffix):

        text_file_reader = pandas.read_csv(file_, encoding='gb18030', iterator=True)

        df = concatData(text_file_reader)
        addPkl(df, md5)
        # packedData(df)
        # df.to_pickle(pkl_file_path)

        return df

    #不支持该文件类型
    else:
        
        return False


def concatData(text_file_reader):
    loop = True
    chunkSize = 1000000
    chunks = []

    while loop:
        try:
            chunks.append(text_file_reader.get_chunk(chunkSize))
        except:
            loop = False

    df = pandas.concat(chunks, ignore_index=True)

    return df


def packedData(df):
    for column in df.columns:
        if df[column].dtype == 'object':
            df[column] = df[column].astype('category')

        elif df[column].dtype == 'int64':
            max = df[column].max()
            min = df[column].min()

            if min >= -128 and max <= 127:
                df[column] = df[column].astype('int8')

            elif min >= -32768 and max <= 32767:
                df[column] = df[column].astype('int16')

            elif min >= -2147483648 and max <= 2147483647:
                df[column] = df[column].astype('int32')


        elif df[column].dtype == 'float64':
            max = df[column].max()
            min = df[column].min()

            if min >= -2147483648 and max <= 2147483647:
                df[column] = df[column].astype('float32')

def setPkl(df, md5):
    pkl_file_path = now_path + '/cache_file/' + md5 + '.pkl'
    df.to_pickle(pkl_file_path)

def addPkl(df, md5):
    create_dir_not_exist('cache_file')

    pkl_file_path = now_path + '/cache_file/' + md5 + '.pkl'

    packedData(df)

    df.to_pickle(pkl_file_path)

    return df

def removePkl(md5):
    create_dir_not_exist('cache_file')

    pkl_file_path = now_path + '/cache_file/' + md5 + '.pkl'
    try:
        os.remove(pkl_file_path)
    except IOError:
        print("似乎没找到文件")

def create_dir_not_exist(path):
    if not os.path.exists(path):
        os.mkdir(path)

#将DataFrame转换成html
def dataFrameToHtml(data):
    print(type(data)  is collections.OrderedDict)
    #如果是一个DataFrame, 将它转换成html
    if(type(data) == pandas.DataFrame):
        return data.to_html()

    #如果是一个字典，遍历字典中的所有DataFrame并返回一个字典类型的html
    elif(type(data) is dict or type(data) is collections.OrderedDict):

        html = {}

        for key in data.keys():
            html[key] = data[key].to_html()
        
        return html

#将DataFrame数据转换成Json
def dataFrameToJson(data):

    if(type(data) == pandas.DataFrame):
        return data.to_json(orient='split', date_format='iso')

    #如果是字典类型，遍历所有key，将其value转换成json
    if(type(data) is dict or type(data) is collections.OrderedDict):
        json_ = {}
        for key in data:
            json_[key] = data[key].to_json(orient='split', date_format='iso')
            
        json_['sheetNames'] = json.dumps(list(data.keys()))
        
        return json_

#将json转换成DataFrame
def jsonToDataFrame(data):
    if(type(data) == dict):
        dataFrame = {}
        for key in data:
            dataFrame[key] = pandas.read_json(data[key])

    else:
        return pandas.read_json(data, orient='split')

#获取文件后缀名函数
def getFileSuffix(filename):
    index = filename.rfind('.')

    return filename[index + 1 :len(filename)]

#获取表中所有字段的求和 计数 方差等...
def getComputations(dataFrame):
    columns = dataFrame.columns()
    data = {}
    for column in columns:
        data['sum'] = dataFrame[column].sum()

def getColumns(dataFrame):
    return dataFrame.columns.to_list()

'''
describe:传入一个df 返回一个类型列表
:param @dataFrame pandas对象
'''
def getTypes(dataFrame):
    types = []
    for column in getColumns(dataFrame):
        types.append(str(dataFrame[column].dtype))

    return types

def getSheets(dataFrame):
    return list(dataFrame.keys())
