import os
import numpy as np
import sys
sys.path.append(".")
import hyper_param as param
import pandas as pd
import redis
import time
import matplotlib.pyplot as plt
DATA_ROOT_PATH ="/home/wujing/Datasets/azure/azurefunction-blob/"
def store_blob_by_index():
    blob_info = pd.read_csv("./redis-samples/blob-accesses-2020-12-01-samples.csv",index_col=0)
    r = redis.Redis(host=param.REDIS_HOST, port=param.REDIS_PORT, decode_responses=False,
                    password=param.REDIS_PWD)
    #print(blob_info.head())
    blob_index_list = blob_info.index.tolist()
    pipe = r.pipeline()
    for idx in range(len(blob_info)):
        try:
            blobs_size = blob_info['BlobBytes'].iloc[idx]
            blob_index = "blob-"+str(blob_index_list[idx])
            blob = bytes(int(blobs_size))
            #print(blob_index,len(r.get(blob_index)))
            pipe.set(blob_index,blob)
        except Exception as e:
            print(blob_index,e)
    pipe.execute()
    #pipe.shutdown()

def measure_blob_read():
    blob_info = pd.read_csv("./redis-samples/blob-accesses-2020-12-01-samples.csv",index_col=0)
    r = redis.Redis(host=param.REDIS_HOST, port=param.REDIS_PORT, decode_responses=False,
                    password=param.REDIS_PWD)
    print(blob_info.head())
    blob_index_list = blob_info.index.tolist()
    result = {"index":[],"0":[],"1":[],"2":[],"3":[],"4":[]}
    for idx in range(len(blob_info)):
        try:
            blob_index = "blob-" + str(blob_index_list[idx])
            result['index'].append(blob_index)
            for k in range(5):
                start = time.time()
                r.get(blob_index)
                end = time.time()
                result[str(k)].append(end-start)
        except Exception as e:
            print(e)
    pd.DataFrame(result).to_csv("./redis-samples/blob-accesses-2020-12-01-latency.csv")

def get_blob_stats():
    blob_info = pd.read_csv("./redis-samples/blob-accesses-2020-12-01-latency.csv")
    print(len(blob_info))
    tmp = blob_info.iloc[:,2:].mean(axis=1)
    blob_info['avg'] = tmp
    #blob_info[['index','0','1','2','3','4','avg']].to_csv("./redis-samples/blob-accesses-2020-12-01-latency.csv")
    # for i in range(1,101,1):
    #     print(i,np.percentile(tmp,i))

def get_popular_worksets():
    parent_path = DATA_ROOT_PATH + "by_popularity/"
    #stats = pd.read_csv(DATA_ROOT_PATH + "invocation_stats_by_app.csv")
    gap = []
    for file in os.listdir(parent_path):
        data = pd.read_csv(parent_path+file)
        #print(data['BlobBytes'].head(3))
        blobs = data[data['BlobBytes'] > 0]['BlobBytes']
        gap.append(blobs.max()/blobs.min())
    pd.DataFrame({"max/min":gap}).to_excel("./max_vs_min_blobs_by_app.xlsx")




try:
    # store_blob_by_index()
    # measure_blob_read()
    # get_blob_stats()
    # get_invocation_by_app()
    get_popular_worksets()
except Exception as e:
    print(e)