# -*- coding: utf-8 -*-


import numpy as np
from glob import glob
import os
import torch as pt
#    0     1     2     3
#  rows, cols, nnz, sparsity, 
#    4              5           6            7
# avr_nnz_row, min_nnz_row, max_nnz_row, var_nnz_row,
#    8              9           10          11 
# avr_nnz_col, min_nnz_col, max_nnz_col, var_nnz_col, 
# max_attribute = np.array([118142155.0, 118142155.0, 1949412601.0, 1.0, 43758.0, 43758.0, 2312481.0, 225988363.5, 
#                           4779.759766, 182.0, 8563808.0, 46950646.81])
# min_attribute = np.array([1.0, 2.0, 1.0, 4.17e-08, 0.597731173, 0.0, 1.0, 0.0, 0.13333334, 0.0, 1.0, 0.0])


# max_attribute = np.array([118142155.0, 118142155.0, 1949412601.0, 1.0, 43758.0, 43758.0, 2312481.0, 225988363.5, 4779.759766, 182.0, 8563808.0, 
#                           46950646.81, 41312696.0, 0.100000009, 6259328.53, 259462.5156, 27659673.0])
# min_attribute = np.array([1.0, 2.0, 1.0, 4.17e-08, 0.597731173, 0.0, 1.0, 0.0, 0.13333334,
#                           0.0, 1.0, 0.0, 1.0, 2.35e-07, 1.0, 1.0, 0.0])


# max_profiling = np.array([890.8, 990.868, 13340438.4, 93.2, 93.25800000000001, 995.894, 88.178, 66.226, 13112995.108, 22.26, 1.388, 0.78, 35.448, 1.418, 35.448, 965.2700000000001, 66.226, 93.25800000000001, 75.922, 79.954, 22.26, 39.922000000000004, 0.39799999999999996, 99.5, 11.938, 0.53, 2064.874, 2093.172, 32.0, 31.49, 536235.9, 171595489.0, 536308.4820000001, 171618714.8, 1024.0, 0.0, 4096.0, 40.0, 0.0, 0.0, 0.0, 0.0, 4194304.0, 51.2, 32.0, 48.0, 32.0, 64.0, 48.0, 75.0, 74.61200000000001, 47.751999999999995, 0.17, 16092454.0, 100.0, 0.0])
# min_profiling = np.array([516.582, 1.0, 3714.0, 0.3579993, 0.036, 1.01, 0.8220000000000001, 0.098, 790.26, 0.17, 0.02, 0.01, 0.49399999999999994, 0.02, 0.49399999999999994, 1.1300000000000001, 0.35799999999999993, 0.35799999999999993, 0.0, 0.21000000000000002, 0.158, 0.5, 0.01, 60.077999999999996, 0.9179999999999999, 0.01, 12.148, 12.279999999999998, 30.329999999999995, 22.97, 8.75, 2800.0, 10.53, 3368.0, 32.0, 0.0, 128.0, 40.0, 0.0, 0.0, 0.0, 0.0, 4096.0, 0.05, 32.0, 1.0, 32.0, 2.0, 25.0, 39.06, 1.636, 1.0459999999999998, 0.03, 249.0, 99.87, 0.0])

max_profiling = np.array([984.47, 999.96, 1748351.0, 89.58, 89.58, 865.82, 64.05, 47.66, 1697390.25, 29.26, 1.62, 1.04, 41.14, 1.65, 41.14, 799.95, 47.66, 89.58, 77.83, 75.84, 29.26, 44.09, 0.44, 99.8, 15.98, 0.74, 6593.69, 6672.66, 32.0, 31.61, 45642.79, 14605693.0, 45701.39, 14624445.0, 1024.0, 0.0, 4096.0, 32.0, 0.0, 0.0, 0.0, 0.0, 4194304.0, 25.6, 32.0, 64.0, 32.0, 64.0, 64.0, 100.0, 97.13, 62.16, 0.18, 729909.0, 100.0, 0.0])
min_profiling = np.array([1.0, 1.0, 5185.0, 1.34, 0.78, 1.16, 1.36, 1.34, 1412.75, 0.58, 0.01, 0.01, 0.2, 0.01, 0.2, 7.14, 1.34, 0.9, 0.42, 1.37, 0.58, 0.2, 0.0, 55.91, 0.91, 0.0, 14.36, 14.68, 31.75, 23.47, 104.0, 33280.0, 107.58, 34427.0, 32.0, 0.0, 128.0, 32.0, 0.0, 0.0, 0.0, 0.0, 4096.0, 0.05, 32.0, 2.0, 32.0, 2.0, 32.0, 50.0, 2.14, 1.37, 0.02, 1486.0, 99.95, 0.0])
mean_profiling = np.array([772.4794078125016, 172.02368750000014, 47056.0340625, 48.586326562500055, 48.13060156250003, 40.48111718750003, 28.039149999999946, 23.084654687499985, 40517.439442187475, 9.006404687499971, 0.4752328124999996, 0.3228562499999911, 12.231915625000035, 0.4892937500000008, 12.231915625000035, 380.9740406249997, 24.187701562499942, 48.42045000000008, 46.698923437500014, 30.524678124999987, 8.871343749999994, 12.719534375000022, 0.12722343750000595, 87.28046562500008, 7.926503124999974, 0.24064687499999926, 90.11755156249995, 91.65643750000007, 31.992018750000014, 28.444246874999873, 3200.9084468749957, 1024290.7521875, 3233.285934375, 1034651.51671875, 468.235, 0.0, 1685.92, 32.0, 0.0, 0.0, 0.0, 0.0, 777103.36, 5.386084375000004, 32.0, 7.6684375, 32.0, 7.6684375, 57.84828125, 90.38850468749972, 49.21277031249997, 31.496201562500016, 0.09495156249999898, 81485.43265625, 99.9998624999999, 0.0])
std_profiling = np.array([60.858150525750666, 367.020546266097, 86710.32735697541, 24.409268275395554, 24.87226408316093, 66.77274168344275, 9.081741115212955, 10.16596217959765, 83205.69330525477, 4.096157017022531, 0.25338053555539863, 0.1506704743336834, 6.615139108448974, 0.26460784892163824, 6.615139108448974, 203.3929408312537, 9.289595650931755, 24.574266744859305, 15.397041494850185, 15.883603495118654, 4.1115365673110515, 7.1596776748812845, 0.0716647582545537, 7.159677674881288, 3.7028772035223168, 0.1275765223414348, 167.7980135148231, 170.14479532490074, 0.014652846086596737, 1.9628212145538713, 4912.986289090835, 1572155.591376964, 4914.576247306461, 1572664.4154884878, 275.2900738766306, 0.0, 1186.630925604082, 0.0, 0.0, 0.0, 0.0, 0.0, 775476.7501962582, 5.548795244724822, 0.0, 10.702222493883898, 0.0, 10.702222493883898, 7.011660835415562, 10.955667694094174, 23.106148409238298, 14.787954298600535, 0.033255603344527346, 97500.42589829497, 0.0013738063000295713, 0.0])


def group_sparse_matrix(sparse_name_list):

    group_list_name = []
    
    f = open("data/data_large_irregular.txt")
    group_tmp = []
    for line_id, line in enumerate(f.readlines()):
        sparse_name = line.strip('\n')
        group_tmp.append(sparse_name)
    f.close
    group_list_name.append(group_tmp)
    
    f = open("data/data_middle_irregular.txt")
    group_tmp = []
    for line_id, line in enumerate(f.readlines()):
        sparse_name = line.strip('\n')
        group_tmp.append(sparse_name)
    f.close
    group_list_name.append(group_tmp)
    
    f = open("data/data_small_irregular.txt")
    group_tmp = []
    for line_id, line in enumerate(f.readlines()):
        sparse_name = line.strip('\n')
        group_tmp.append(sparse_name)
    f.close
    group_list_name.append(group_tmp)
    
    f = open("data/data_large_regular.txt")
    group_tmp = []
    for line_id, line in enumerate(f.readlines()):
        sparse_name = line.strip('\n')
        group_tmp.append(sparse_name)
    f.close
    group_list_name.append(group_tmp)
    
    f = open("data/data_middle_regular.txt")
    group_tmp = []
    for line_id, line in enumerate(f.readlines()):
        sparse_name = line.strip('\n')
        group_tmp.append(sparse_name)
    f.close
    group_list_name.append(group_tmp)
    
    # f = open("data/data_small_regular.txt")
    # group_tmp = []
    # for line_id, line in enumerate(f.readlines()):
    #     sparse_name = line.strip('\n')
    #     group_tmp.append(sparse_name)
    # f.close
    # group_list_name.append(group_tmp)

    group_list_id = [[] for i in range(5)]
    for s_id, sparse_name in enumerate(sparse_name_list):
        for step in range(len(group_list_name)):
            if (sparse_name in group_list_name[step]):
                group_list_id[step].append(s_id)
                
    return group_list_id


# f = open(f'sample_data_400.txt')
# sparse_name_list = []
# for line_id, line in enumerate(f.readlines()):
#     sparse_name = line.strip('\n')
#     sparse_name_list.append(sparse_name)
# f.close
# print("#matrix size: ", len(sparse_name_list))
# sparse_name_list = sparse_name_list[160:]
# group_list_id = group_sparse_matrix(sparse_name_list)
# length = 0
# for i in range(4):
#     length += len(group_list_id[i])
#     print(len(group_list_id[i]))
# print(length)
# for i in range(4):
#     print(group_list_id[i])



# def scale_back_01(action):
#     act_k = (1 - 0)/ 2.
#     act_b = (1 + 0)/ 2.
#     return act_k * action + act_b

# def scale_from_01(action):
#     act_k_inv = 2./(1 - 0)
#     act_b = (1 + 0)/ 2.
#     return act_k_inv * (action - act_b)

def find_nearest(array, value):
    array = np.asarray(array)
    idx = (np.abs(array - value)).argmin()
    return array[idx]

                
    
import subprocess
import time
 
def run_cmd(cmd_string, timeout=10):
    # print("����Ϊ��" + cmd_string)
    # try:
    p = subprocess.Popen(cmd_string, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
    t_beginning = time.time()
    res_code = 0
    while True:
        if p.poll() is not None:
            break
        seconds_passed = time.time() - t_beginning
        if timeout and seconds_passed > timeout:
            p.terminate()  # ��ͬ��p.kill()
            msg = "Timeout :Command '" + cmd_string + "' timed out after " + str(timeout) + " seconds"
            raise Exception(msg)
        time.sleep(0.1)

    msg = str(p.stdout.read().decode('utf-8'))
    # except Exception as e:
    #     res_code = 200
    #     msg = "[ERROR]Unknown Error : " + str(e)
 
    return res_code, msg

def move_to_mnt_file(sparse_name):
    
    rm_instruction = 'rm /mnt/55G/*'
    ret_code, msg = run_cmd(rm_instruction, 20)

    
    file_path = f'data/matrix_suite_tar/{sparse_name}/{sparse_name}_binary_coo*'
    
    file_path_list = glob(file_path)
    size_of_file = 0
    for fn in file_path_list:
        file_stats = os.stat(fn)
        size_of_file += file_stats.st_size / (1024 * 1024 * 1024)
    # print(size_of_file)
    
    if size_of_file > 39:
        return pt.tensor(False).cuda()
    try:
        move_instruction = f'cp {file_path} /mnt/55G/'
        ret_code, msg = run_cmd(move_instruction, 30)
        if (len(msg) != 0):
            return pt.tensor(False).cuda()
    except:
        return pt.tensor(False).cuda()
    
    return pt.tensor(True).cuda()



def run_cmd_runtime_out(cmd_string, timeout=10):
    # print("����Ϊ��" + cmd_string)
    # try:
    p = subprocess.Popen(cmd_string, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
    t_beginning = time.time()
    res_code = 0
    while True:
        if p.poll() is not None:
            break
        seconds_passed = time.time() - t_beginning
        if timeout and seconds_passed > timeout:
            p.terminate()  # ��ͬ��p.kill()
            msg = "Timeout :Command '" + cmd_string + "' timed out after " + str(timeout) + " seconds"
            raise Exception(msg)
        time.sleep(0.1)

    msg = str(p.stdout.read().decode('utf-8'))
    # except Exception as e:
    #     res_code = 200
    #     msg = "[ERROR]Unknown Error : " + str(e)
 
    return res_code, msg


def containsNanArgsort(List):
    #"""
    #��ȡ����nanֵ���б���argsort
    #:param List:
    #:return:
    #"""
    # �Ȼ�ȡ�ź��������
    a = np.argsort(List)
    # ���ֵ����ʽ�洢
    res = [(List[i], num) for num, i in enumerate(a)]
    res=dict([i for i in res if not np.isnan(i[0])])
    #��������������������ֵ����򷵻�-1
    # print(res)
    res2=[]
    for i in List:
        if i in res.keys():
            res2.append(res[i])
        else:
            res2.append(-1)
    return np.array(res2)


def priority_binary_buffer_insert(data_buffer, data_tmp, data_list, buffer_size):

    length = len(data_buffer)
    
    if (length == 0):
        data_buffer.extend(data_list)
    
    else:
        left = 0
        right = length
    
        while (left + 1 < right):
            mid = left + (right - left) // 2
            if (data_buffer[mid][5] >= data_tmp):
                left = mid
            else:
                right = mid
            # else if (row_offset[mid] > flag)
            #     high = mid;
        if (data_buffer[left][5] > data_tmp):
            for data_insert in data_list:
                data_buffer.insert(right, data_insert)
        else:
            for data_insert in data_list:
                data_buffer.insert(left, data_insert)
    
    return data_buffer[:buffer_size]

'''
priority_data_buffer = [3]

priority_binary_buffer_append(priority_data_buffer, 2, 5)
print(priority_data_buffer)

priority_binary_buffer_append(priority_data_buffer, 2, 5)
print(priority_data_buffer)

priority_binary_buffer_append(priority_data_buffer, 3, 5)
print(priority_data_buffer)

'''
    

