import warnings

warnings.filterwarnings('ignore', category=UserWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
import sys

sys.path.append(".")
import os

DATA_ROOT_PATH = "/home/wujing/Datasets/azure/azurefunction-blob/"
from concurrent.futures import ProcessPoolExecutor
import pandas as pd
import time


def extract_timestamp_info(file_path, columns):
    if ".csv" in file_path:
        data = pd.read_csv(file_path, columns=columns)
    else:
        data = pd.read_parquet(file_path, columns=columns)
    datestamps = data['Timestamp'].values
    days = []
    Timestamp_str = []
    for i in range(len(data)):
        tmp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(datestamps[i] / 1000.0))
        days.append(tmp.split(" ")[0])
        Timestamp_str.append(tmp)

    data['Timestamp_day'] = days
    data['Timestamp_str'] = Timestamp_str


def sort_data_by_app(file_name='azurefunctions-blob-accesses-2020.csv'):
    data = pd.read_csv(DATA_ROOT_PATH + file_name)
    target_path = DATA_ROOT_PATH + "by_app/"
    if not os.path.exists(target_path):
        os.makedirs(target_path)
    apps = data['AnonAppName'].value_counts().index.tolist()
    for app in apps:
        tmp = data[data['AnonAppName'] == app]
        tmp.to_csv(target_path + "/blob-accesses-" + app + ".csv")


def sort_data_by_day(file_name='azurefunction-blob-accesses-2020-core-info.parquet'):
    data = pd.read_parquet(DATA_ROOT_PATH + file_name)
    target_path = DATA_ROOT_PATH + "by_day/"
    if not os.path.exists(target_path):
        os.makedirs(target_path)
    days = data['Timestamp_day'].value_counts().index.tolist()

    for day in days:
        tmp = data[data['Timestamp_day'] == day]
        tmp.to_parquet(target_path + "/blob-accesses-" + day + ".parquet")


def get_sample_blob(date="2020-12-01"):
    data = pd.read_parquet(DATA_ROOT_PATH + "by_day/blob-accesses-" + date + ".parquet")
    read_info = data[(data['Read'] == True) & (data['BlobBytes'] > 0)]
    max_idx = read_info['BlobBytes'].argmax()
    gap, samples = None, None
    for i in range(5):
        blobs = read_info.sample(n=9999)
        blobs.loc[-1] = read_info.iloc[max_idx].values.tolist()
        blobs.index = blobs.index + 1
        blobs = blobs.sort_index()
        if gap is None or blobs['BlobBytes'].max() / blobs['BlobBytes'].min() > gap:
            gap = blobs['BlobBytes'].max() / blobs['BlobBytes'].min()
            samples = blobs
            # print(gap,blobs['BlobBytes'].max()/1024/1024/1024,blobs['BlobBytes'].min()/1024/1024/1024,blobs['BlobBytes'].sum()/1024/1024/1024)
    samples.to_csv("./redis-samples/blob-accesses-" + date + "-samples.csv")


def get_invocation_by_app():
    parent_path = DATA_ROOT_PATH + "by_app/"
    result = {'app_file': [], 'invocation': []}
    for app_file in os.listdir(parent_path):
        app_data = pd.read_csv(parent_path + app_file)
        result['app_file'].append(app_file)
        result['invocation'].append(len(app_data))
    pd.DataFrame(result).to_csv(DATA_ROOT_PATH + "invocation_stats_by_app.csv")


def save_file(src_file, target_file):
    app_data = pd.read_csv(src_file, index_col=0)
    app_data.to_csv(target_file)


def get_popular_app():
    stats = DATA_ROOT_PATH + "invocation_stats_by_app.csv"
    apps = pd.read_csv(stats).sort_values('invocation', ascending=False).head(100)
    parent_path = DATA_ROOT_PATH + "by_app/"
    # print(apps)
    target_path = DATA_ROOT_PATH + "by_popularity/"
    if not os.path.exists(target_path):
        os.makedirs(target_path)

    src_file_list = apps['app_file'].tolist()
    target_file_list = []
    idx = 1
    for src_file in src_file_list:
        target_file_list.append("blob-accesses-top=" + str(idx) + ".csv")
        idx += 1
    # print(target_file_list)
    for i in range(len(target_file_list)):
        with ProcessPoolExecutor(max_workers=40) as exec:
            try:
                # save_file(parent_path+src_file_list[i],target_path+target_file_list[i])
                # break
                exec.submit(save_file, parent_path + src_file_list[i], target_path + target_file_list[i])
            except Exception as e:
                print(e)


# sort data by timestamp
# evaluate the size of each day such that it does not overwhelm the vm
# write the redis based on the read info
# access the data while recording the time costs
# sort_data_by_app()
try:
    get_popular_app()
except Exception as e:
    print(e)
