#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import time
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
from collect_results import circle_to_latencyInUs, get_numOps_from_perfLog, NUM_ITER

from os.path import join
import fileinput


def get_model_in_ldb_pct(filepath):
    ldbs = []
    fmodels = []
    with open(filepath) as f:
        lines = f.readlines()
        for line in lines:
            line = line[:-1]  # 去掉回车符号
            if '.' in line:
                if line.split('.')[1] == 'ldb':
                    ldbs.append(line)
                elif line.split('.')[1] == 'fmodel':
                    fmodels.append(line)

    num_alive_fmodels = 0
    for fmodel in fmodels:
        f_num = fmodel.split('.')[0]
        curresponding_ldb = (6-len(f_num))*'0' + f_num + '.ldb'
        if curresponding_ldb in ldbs:
            num_alive_fmodels = num_alive_fmodels + 1
    # 返回：fmodel的个数，有效的fmodel的个数， sstable的个数，具有fmodel的ldb占所有ldb的比例
    return len(fmodels), num_alive_fmodels, len(ldbs), float(num_alive_fmodels)/float(len(ldbs))


def get_fmodelPctDecay_4dataName(dataname, execultables, mds):
    pct_exe = dict()
    for executable in execultables:
        pct_md = dict()
        for md in mds:
            filepath_suffix = '../runlogs/' + dataname + '_' + executable + '_' + md + '_'

            l_fmodels = []
            num_alive_fmodels = []
            l_ldbs = []
            pcts = []

            for i in range(NUM_ITER):
                filename = 'iter' + str(i) + '.txt'
                l_fmodel, num_alive_fmodel, l_ldb, pct = get_model_in_ldb_pct(filepath_suffix + filename)

                l_fmodels.append(l_fmodel)
                num_alive_fmodels.append(num_alive_fmodel)
                l_ldbs.append(l_ldb)
                pcts.append(pct)
            pct_md[md] = {'num_fmodel': l_fmodels, 'num_alive_fmodels': num_alive_fmodels, 'num_ldb': l_ldbs, 'pcts': pcts}
        pct_exe[executable] = pct_md

    return pct_exe

# 2022、6、1 更新：自适应的cpu频率和操作数
ROOT = "../evaluation"


# 获取cpu频率，因为计时采用的是CPU周期
def get_cpu_ghz():
    for line in fileinput.input('/proc/cpuinfo'):
        if 'model name' in line:
            value = line.split('@ ')[1][:-1]
            digit = value[:-3]
            tag = value[-3:]
            if tag == "GHz":
                return float(digit)
            elif tag == "MHz":
                return float(digit) / 1000.0
            else:
                raise ValueError

cpu_freq_ghz = get_cpu_ghz()
S2US_FCTR = 1000000
K_FCTR = 1000
G_FCTR = 1000 * 1000 * 1000

def get_numLoadRun_fromScan(filename):
    with open(filename, 'r') as f:
        line0 = f.readline()
        line1 = f.readline()
    formerall = int(line0.split('-')[1])
    formerread = int(line0.split('-')[3])
    formerwrite = int(line0.split('-')[5])

    formerread_pct = (formerread + 0.0) / formerall
    formerwrite_pct = (formerwrite + 0.0) / formerall

    latterall = int(line1.split('-')[1])
    latterread = int(line1.split('-')[3])
    latterwrite = int(line1.split('-')[5])

    latterread_pct = (latterread + 0.0) / latterall
    latterwrite_pct = (latterwrite + 0.0) / latterall

    assert formerall == formerread + formerwrite
    assert latterall == latterread + latterwrite

    d = dict({'formerall': formerall, 'formerread': formerread, 'formerwrite': formerwrite,
              'formerread_pct': formerread_pct, 'formerwrite_pct': formerwrite_pct,
              'latterall': latterall, 'latterread': latterread, 'latterwrite': latterwrite,
              'latterread_pct': latterread_pct, 'latterwrite_pct': latterwrite_pct})
    return d


def get_opTime_periods(filename, num):
    lines = []
    with open(filename, 'r') as f:
        for line in f:
            if line.startswith("Timer " + str(num)) and (not line.startswith("Timer " + str(num) + " MEAN")):
                lines.append(int(line.split(": ")[1].split('\n')[0]))

    return lines


# put在没有重新分配训练元数据的情况下， 重新跑了一遍， 所以统计的时候，两个文件夹分开。
def get_perf_latency(dataname, operations):
    print("get_perf_latency of dataset %s" % dataname)
    plt.figure(figsize=[13, 9])
    perf = dict()
    for operation in operations:
        dataset_type = dataname + '_' + operation
        baselines_circles_all = get_opTime_periods(join(ROOT, "{}_baseline.txt".format(dataset_type)), 13)
        llsm_fs_circles_all = get_opTime_periods(join(ROOT, "{}_llsm.txt".format(dataset_type)), 13)
        baselines_circles_read = get_opTime_periods(join(ROOT, "{}_baseline.txt".format(dataset_type)), 4)
        llsm_fs_circles_read = get_opTime_periods(join(ROOT, "{}_llsm.txt".format(dataset_type)), 4)
        baselines_circles_write = get_opTime_periods(join(ROOT, "{}_baseline.txt".format(dataset_type)), 10)
        llsm_fs_circles_write = get_opTime_periods(join(ROOT, "{}_llsm.txt".format(dataset_type)), 10)

        num_ops = get_numOps_from_perfLog("../evaluation/" + dataname + '_' + operation + '_baseline.txt')
        # assert (num_ops > 0)  / 可能有没跑的，因为N_PERIODS过大
        if operation == "trace_allGet_16":
            num_ops['read_nums'] = num_ops['all_nums']
            num_ops['write_nums'] = [0] * NUM_ITER
        if operation == "trace_allPut_16":
            num_ops['write_nums'] = num_ops['all_nums']
            num_ops['read_nums'] = [0] * NUM_ITER

        if baselines_circles_all != []:
            baselines_all = circle_to_latencyInUs(np.array(baselines_circles_all), num_ops['all_nums'])
            llsm_fs_all = circle_to_latencyInUs(np.array(llsm_fs_circles_all), num_ops['all_nums'])
            baselines_read = circle_to_latencyInUs(np.array(baselines_circles_read), num_ops['read_nums'])
            llsm_fs_read = circle_to_latencyInUs(np.array(llsm_fs_circles_read), num_ops['read_nums'])
            baselines_write = circle_to_latencyInUs(np.array(baselines_circles_write), num_ops['write_nums'])
            llsm_fs_write = circle_to_latencyInUs(np.array(llsm_fs_circles_write), num_ops['write_nums'])
        else:
            baselines_all = []
            llsm_fs_all = []
            baselines_read = []
            llsm_fs_read = []
            baselines_write = []
            llsm_fs_write = []

        perf[operation] = dict({'baseline_all': baselines_all, 'llsm_all': llsm_fs_all, 'baseline_read': baselines_read, 'llsm_read': llsm_fs_read, 'baseline_write': baselines_write, 'llsm_write': llsm_fs_write})
    return perf


def plot_iterationDecay(dataname):
    operations = ["trace_allGet_16", "trace_allPut_16", "trace_realRun_16"]
    # operations = ["trace_allGet_16", "trace_realRun_16"]

    perf_latency_decay = get_perf_latency(dataname, operations)

    mds = ['baseline', 'llsm']  # 路径的最后一个组成部分，加上/符号
    perf_fmodelPct_decay = get_fmodelPctDecay_4dataName(dataname, operations, mds)
    # numAll, numLoad, numRun, read_number_inLoad, readPctInLoad, WriteNumInLoad, WritePctInLoad = the_dict[]
    for i in range(3):
        plt.subplot(3, 1, i+1)
        operation = operations[i]
        latency_baselines_read = perf_latency_decay[operation]['baseline_read']
        latency_llsms_read = perf_latency_decay[operation]['llsm_read']
        plt.plot(latency_baselines_read, label='read_bs', marker='^', color='b', ls='-', alpha=0.6)
        plt.plot(latency_llsms_read, label='read_llsm', marker='v', color='b', ls='--', alpha=0.4)

        latency_baselines_all = perf_latency_decay[operation]['baseline_all']
        latency_llsms_all = perf_latency_decay[operation]['llsm_all']
        # plt.plot(latency_baselines_all, label='all_bs', marker='^', color='k', ls='-', alpha=0.6)
        # plt.plot(latency_llsms_all, label='all_llsm', marker='v', color='k', ls='--', alpha=0.4)

        latency_baselines_write = perf_latency_decay[operation]['baseline_write']
        latency_llsms_write = perf_latency_decay[operation]['llsm_write']
        # plt.plot(latency_baselines_write, label='write_bs', marker='^', color='r', ls='-', alpha=0.6)
        # plt.plot(latency_llsms_write, label='write_llsm', marker='v', color='r', ls='--', alpha=0.4)

        mag = 1 # 放大一下，看得清楚
        fmodelPct_decay = perf_fmodelPct_decay[operation]['llsm']['pcts']
        fmodelPct_decay_mag = [1*fmodelPct_decay[i] for i in range(len(fmodelPct_decay))]
        plt.plot(fmodelPct_decay_mag, label='fmodelPct', marker='x', ls='--')

        ops = get_numOps_from_perfLog("../evaluation/" + dataname + '_' + operation + '_baseline.txt')
        xname_list = ops['read_ratios']
        plt.xticks(np.arange(len(xname_list)), ["%.0f" % (it*100) for it in xname_list])
        # for j_ in range(len(fmodelPct_decay_mag)):
        #     plt.text(j_-0.2, -0.4, '%.1f%%' %(fmodelPct_decay[j_]*100))

        if type(latency_baselines_read) == np.ndarray:
            read_fasters = [latency_baselines_read[i] / latency_llsms_read[i] for i in range(len(latency_baselines_read))]
        else:
            read_fasters = [-1]
        read_fasters_mag = [1*read_fasters[i] for i in range(len(read_fasters))]
        plt.plot(read_fasters_mag, label='read fasters', marker='d', ls='-', color='red')
        for j_ in range(len(read_fasters_mag)):
            if j_ % 2 == 0:
                plt.text(j_-0.2, read_fasters_mag[j_], '%.2f' %read_fasters[j_])

        read_faster_mean = np.array(read_fasters_mag).mean()
        plt.axhline(y=read_faster_mean, color='red', alpha=0.7, ls='--')
        plt.text(-2.5, read_faster_mean, '%.2f' % read_faster_mean, color='red')

        # 根据最大的leveldb sstable数目确定其缩放的比例，最大是5.5
        num_ldb = perf_fmodelPct_decay[operation]['llsm']['num_ldb']
        num_alive_fmodels = perf_fmodelPct_decay[operation]['llsm']['num_alive_fmodels']
        PLOT_MAX = np.max(np.array(np.max(np.array(num_ldb)), np.max(np.array(num_alive_fmodels))))
        PLOT_MAG = 4.5 / PLOT_MAX

        num_ldb_plot = np.array(num_ldb)*PLOT_MAG
        plt.bar(range(len(num_ldb)), num_ldb_plot, label='num_ldb', alpha=0.6)
        # for j_ in range(len(num_ldb)):
        #     plt.text(j_-0.2, num_ldb_plot[j_], '%.0f' %num_ldb[j_], color='b')

        num_alive_fmodels_plot = np.array(num_alive_fmodels)*PLOT_MAG
        plt.bar(range(len(num_alive_fmodels)), num_alive_fmodels_plot, label='num_alive_fmodels', alpha=0.6)
        # for j_ in range(len(num_alive_fmodels)):
        #     plt.text(j_-0.2, num_alive_fmodels_plot[j_], '%.0f' %num_alive_fmodels[j_], color='b')

        title = operation + '. xticks: READ percent'
        plt.axhline(y=1, color='gray', alpha=0.5)
        # plt.xlim(-0.5, 12)
        # plt.ylim(-1.2, 5.5)
        plt.xlabel(title)
        plt.ylabel('latency in microseconds')
        if i == 1:
            plt.legend()

    perf_raw_path = "../evaluation/" + dataname + '_' + operation + "_baseline.txt"
    num_ops_period = ops['all_nums'][0]
    the_d = get_numLoadRun_fromScan(perf_raw_path)
    plt.suptitle("%s. Load %.1fM, Read %.1fM(%.0f%%), Write %.1fM(%.0f%%)--Run %.1fM, Read %.1fM(%.0f%%), Write %.1fM(%.0f%%)--opsPeriod: %d"
                 % (dataname, the_d['formerall']/1000000, the_d['formerread']/1000000, the_d['formerread_pct']*100, the_d['formerwrite']/1000000, the_d['formerwrite_pct']*100,
                    the_d['latterall']/1000000, the_d['latterread']/1000000, the_d['latterread_pct']*100, the_d['latterwrite']/1000000, the_d['latterwrite_pct']*100, num_ops_period))

    plt.savefig('../evaluation_global/iterationDecay_' + dataname + "_opsPeriod_" + str(num_ops_period) + '.png')
    # plt.show()
    plt.close()


if __name__ == '__main__':
    dataname = sys.argv[1]
    plot_iterationDecay(dataname)
