

import h5py
import numpy as np
import os
import re
import torch as pt
from constant import min_profiling, max_profiling
from constant import mean_profiling, std_profiling
from constant import run_cmd, run_cmd_runtime_out
import time

import sys 
sys.path.append("..") 

time_sleep = 1.0



# def next_step_back_mp_v3(sparse_name, nnz_number, formats, parameters, rank, world_size):
    
#     save_path = '../test'
#     format_selection = ['coo_atom_new', 'csr_unified']
    
#     runtime_record_name = f'{save_path}/runtime_{sparse_name}_{format_selection[formats]}_{parameters[0]}_{parameters[1]}_{parameters[2]}.log'
#     if not os.path.exists(runtime_record_name):
#         runtime_record = f'CUDA_VISIBLE_DEVICES={rank + world_size} ./{format_selection[formats]} \
#                             ../data/matrix_suite_tar/{sparse_name}/{sparse_name}.mtx  \
#                             {parameters[0]} {parameters[1]} {parameters[2]} \
#                             > {save_path}/runtime_{sparse_name}_{format_selection[formats]}_{parameters[0]}_{parameters[1]}_{parameters[2]}.log 2>&1'
#         os.system(runtime_record)
    
#     profiling_txt_name = f'{save_path}/report_{sparse_name}_{format_selection[formats]}_{parameters[0]}_{parameters[1]}_{parameters[2]}.log'
#     if not os.path.exists(profiling_txt_name):
#         profiling_txt = f'sudo CUDA_VISIBLE_DEVICES={rank + world_size} /usr/local/cuda-11.7/bin/ncu \
#                             -s 4 -c 1  --set full  \
#                             ./{format_selection[formats]} data/matrix_suite_tar/{sparse_name}/{sparse_name}.mtx {nnz_number} \
#                             {parameters[0]} {parameters[1]} {parameters[2]}  \
#                             > {save_path}/report_{sparse_name}_{format_selection[formats]}_{parameters[0]}_{parameters[1]}_{parameters[2]}.log 2>&1'
#         os.system(profiling_txt)

#     ###################
#     f = open(f'{save_path}/report_{sparse_name}_{format_selection[formats]}_{parameters[0]}_{parameters[1]}_{parameters[2]}.log')
#     profiling_feature = []
#     if_data_line = False

#     for line_id, line in enumerate(f.readlines()):
#         # try:
        
#         # if (line_id < 25):  continue
#         if (line[:8] == '    ----' and len(line.split(' ')) == 7):
#             if_data_line = not if_data_line
#             continue
        
#         if (if_data_line == True):
#             line_tmp = line.split()[-1]
#             line_tmp = line_tmp.replace(",", "")
#             if (line_tmp == 'cudaFuncCachePreferNone'):
#                 profiling_feature.append(float(0))
#             elif (line_tmp == 'cudaFuncCachePreferShared'):
#                 profiling_feature.append(float(1))
#             elif (line_tmp == 'cudaFuncCachePreferL1'):
#                 profiling_feature.append(float(2))
#             elif (line_tmp == 'cudaFuncCachePreferEqual'):
#                 profiling_feature.append(float(3))
            
#             else:
#                 profiling_feature.append(float(line_tmp))
        
#         # except:
#         #     print(line_id)
#         #     input()
#     f.close

#     profiling_feature = np.array(profiling_feature).reshape((5, -1))
#     profiling_feature = np.mean(profiling_feature, axis=0)  # 62 features
#     # print(profiling_feature.tolist())
    
#     profiling_feature = (profiling_feature - min_profiling) / (max_profiling - min_profiling + 1e-5)


#     f = open(f'python_log_test/runtime_{sparse_name}_{format_selection[formats]}_{parameters[0]}_{parameters[1]}_{parameters[2]}.log')
#     gflops = 0
#     for line_id, line in enumerate(f.readlines()):
#         if (line_id == 3):
#             numbers = re.findall(r"\d+\.?\d*", line)
#             gflops = float(numbers[1])
#     f.close
#     assert gflops != 0

#     ###################
    
#     return profiling_feature, gflops

# def next_step_back_stable_mp_v3(sparse_name, nnz_number, formats, parameters, rank, world_size):
    
#     format_selection = ['coo_atom_new', 'csr_unified']

#     runtime_record = f'CUDA_VISIBLE_DEVICES={rank + world_size} /home/wangyaoyu/SPMV_practice/{format_selection[formats]} \
#                         /home/wangyaoyu/SPMV_practice/data/matrix_suite_tar/{sparse_name}/{sparse_name}.mtx {nnz_number}  \
#                         {parameters[0]} {parameters[1]} {parameters[2]} \
#                         > /home/wangyaoyu/SPMV_practice/python_log_test/runtime_{sparse_name}_{format_selection[formats]}_{parameters[0]}_{parameters[1]}_{parameters[2]}.log 2>&1'
#     os.system(runtime_record)
#     profiling_txt = f'sudo CUDA_VISIBLE_DEVICES={rank + world_size} /usr/local/cuda-11.7/bin/ncu \
#                         -s 1000 -c 5  --set full  \
#                         ./{format_selection[formats]} data/matrix_suite_tar/{sparse_name}/{sparse_name}.mtx {nnz_number} \
#                         {parameters[0]} {parameters[1]} {parameters[2]}  \
#                         > python_log_test/report_{sparse_name}_{format_selection[formats]}_{parameters[0]}_{parameters[1]}_{parameters[2]}.log 2>&1'
#     os.system(profiling_txt)

#     ###################
#     f = open(f'python_log_test/report_{sparse_name}_{format_selection[formats]}_{parameters[0]}_{parameters[1]}_{parameters[2]}.log')
#     profiling_feature = []
#     if_data_line = False

#     for line_id, line in enumerate(f.readlines()):
#         # try:
        
#         # if (line_id < 25):  continue
#         if (line[:8] == '    ----' and len(line.split(' ')) == 7):
#             if_data_line = not if_data_line
#             continue
        
#         if (if_data_line == True):
#             line_tmp = line.split()[-1]
#             line_tmp = line_tmp.replace(",", "")
#             if (line_tmp == 'cudaFuncCachePreferNone'):
#                 profiling_feature.append(float(0))
#             elif (line_tmp == 'cudaFuncCachePreferShared'):
#                 profiling_feature.append(float(1))
#             elif (line_tmp == 'cudaFuncCachePreferL1'):
#                 profiling_feature.append(float(2))
#             elif (line_tmp == 'cudaFuncCachePreferEqual'):
#                 profiling_feature.append(float(3))
            
#             else:
#                 profiling_feature.append(float(line_tmp))
        
#         # except:
#         #     print(line_id)
#         #     input()
#     f.close

#     profiling_feature = np.array(profiling_feature).reshape((5, -1))
#     profiling_feature = np.mean(profiling_feature, axis=0)  # 62 features
#     # print(profiling_feature.tolist())
    
#     profiling_feature = (profiling_feature - min_profiling) / (max_profiling - min_profiling + 1e-5)


#     f = open(f'python_log_test/runtime_{sparse_name}_{format_selection[formats]}_{parameters[0]}_{parameters[1]}_{parameters[2]}.log')
#     gflops = 0
#     for line_id, line in enumerate(f.readlines()):
#         if (line_id == 3):
#             numbers = re.findall(r"\d+\.?\d*", line)
#             gflops = float(numbers[1])
#     f.close
#     assert gflops != 0

#     ###################
    
#     return profiling_feature, pt.tensor(gflops).cuda()


# def next_step_back_mp_csr(sparse_name, block_num, thread_num, element_num, rank, world_size):
    
#     save_path = ''
#     runtime_record_name = f'{save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log'
#     if not os.path.exists(runtime_record_name):        
#         runtime_record = f'CUDA_VISIBLE_DEVICES={rank + world_size} ./csr_unified  \
#                             /mnt/5G/{sparse_name} \
#                             {block_num} {thread_num} {element_num}\
#                             > {save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
#         # os.system(runtime_record)
#         run_cmd(runtime_record)
    
#     profiling_txt_name = f'python_log_test_csr/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log'
#     if not os.path.exists(profiling_txt_name):
#         profiling_txt = f'sudo CUDA_VISIBLE_DEVICES={rank + world_size} /usr/local/cuda-11.7/bin/ncu \
#                             -s 4 -c 1  --set full  \
#                             ./csr_unified_profiling /mnt/5G/{sparse_name} \
#                             {block_num} {thread_num} {element_num} \
#                             > python_log_test_csr/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
#         # os.system(profiling_txt)
#         run_cmd(profiling_txt)
    
#     ###################
#     f = open(f'python_log_test_csr/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
#     profiling_feature = []
#     if_data_line = False

#     for line_id, line in enumerate(f.readlines()):
#         # try:
        
#         # if (line_id < 25):  continue
#         if (line[:8] == '    ----' and len(line.split(' ')) == 7):
#             if_data_line = not if_data_line
#             continue
        
#         if (if_data_line == True):
#             line_tmp = line.split()[-1]
#             line_tmp = line_tmp.replace(",", "")
#             if (line_tmp == 'cudaFuncCachePreferNone'):
#                 profiling_feature.append(float(0))
#             elif (line_tmp == 'cudaFuncCachePreferShared'):
#                 profiling_feature.append(float(1))
#             elif (line_tmp == 'cudaFuncCachePreferL1'):
#                 profiling_feature.append(float(2))
#             elif (line_tmp == 'cudaFuncCachePreferEqual'):
#                 profiling_feature.append(float(3))
            
#             else:
#                 profiling_feature.append(float(line_tmp))
        
#         # except:
#         #     print(line_id)
#         #     input()
#     f.close

#     profiling_feature = np.array(profiling_feature).reshape((1, -1))
#     profiling_feature = np.mean(profiling_feature, axis=0)  # 62 features
#     # print(profiling_feature.tolist())
#     profiling_feature = (profiling_feature - min_profiling) / (max_profiling - min_profiling + 1e-5)

#     # try:
#     f = open(f'python_log_test_csr/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
#     gflops = 0
#     for line_id, line in enumerate(f.readlines()):
#         if (line_id == 3):
#             numbers = re.findall(r"\d+\.?\d*", line)
#             gflops = float(numbers[1])
#     f.close
#     assert gflops != 0
#     assert gflops < 500
#     # except:
#     #     print(runtime_record_name)
#     #     print(f'python_log_test_csr/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
#     #     print(gflops)
#         # input()

#     ###################
    
#     return profiling_feature, pt.tensor(gflops).cuda()

# def next_step_back_stable_mp_csr(sparse_name, block_num, thread_num, element_num, rank, world_size):
    

#     runtime_record = f'CUDA_VISIBLE_DEVICES={rank + world_size} ./csr_unified \
#                          /mnt/5G/{sparse_name}  \
#                          {block_num} {thread_num} {element_num} \
#                         >  python_log_test_csr/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
#     # os.system(runtime_record)
#     run_cmd(runtime_record)
#     profiling_txt = f'sudo CUDA_VISIBLE_DEVICES={rank + world_size} /usr/local/cuda-11.7/bin/ncu \
#                         -s 4 -c 1  --set full  \
#                         ./csr_unified_profiling /mnt/5G/{sparse_name} \
#                          {block_num} {thread_num} {element_num}  \
#                         > python_log_test_csr/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
#     # os.system(profiling_txt)
#     run_cmd(profiling_txt)
    
#     ###################
#     f = open(f'python_log_test_csr/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
#     profiling_feature = []
#     if_data_line = False

#     for line_id, line in enumerate(f.readlines()):
#         # try:
        
#         # if (line_id < 25):  continue
#         if (line[:8] == '    ----' and len(line.split(' ')) == 7):
#             if_data_line = not if_data_line
#             continue
        
#         if (if_data_line == True):
#             line_tmp = line.split()[-1]
#             line_tmp = line_tmp.replace(",", "")
#             if (line_tmp == 'cudaFuncCachePreferNone'):
#                 profiling_feature.append(float(0))
#             elif (line_tmp == 'cudaFuncCachePreferShared'):
#                 profiling_feature.append(float(1))
#             elif (line_tmp == 'cudaFuncCachePreferL1'):
#                 profiling_feature.append(float(2))
#             elif (line_tmp == 'cudaFuncCachePreferEqual'):
#                 profiling_feature.append(float(3))
            
#             else:
#                 profiling_feature.append(float(line_tmp))
        
#         # except:
#         #     print(line_id)
#         #     input()
#     f.close

#     profiling_feature = np.array(profiling_feature).reshape((1, -1))
#     profiling_feature = np.mean(profiling_feature, axis=0)  # 62 features
#     # print(profiling_feature.tolist())
    
#     profiling_feature = (profiling_feature - min_profiling) / (max_profiling - min_profiling + 1e-5)

#     # try:
#     f = open(f'python_log_test_csr/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
#     gflops = 0
#     for line_id, line in enumerate(f.readlines()):
#         if (line_id == 3):
#             numbers = re.findall(r"\d+\.?\d*", line)
#             gflops = float(numbers[1])
#     f.close
#     assert gflops != 0
#     assert gflops < 500
#     # except:
#     #     print(f'python_log_test_csr/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
#     #     print(gflops)
#         # input()

#     ###################
    
#     return profiling_feature, pt.tensor(gflops).cuda()


# def next_step_back_mp_coo_large_scale_runtime_out(sparse_name, block_num, thread_num, element_num, rank, world_size):
    
#     if_runtime_out = False
    
#     runtime_record_name = f'python_log_test_coo_large_scale/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log'
#     if not os.path.exists(runtime_record_name):
#         runtime_record = f'CUDA_VISIBLE_DEVICES={rank + world_size} ./coo_atom_binary  \
#                             data/matrix_suite_tar/{sparse_name}/{sparse_name} \
#                             {block_num} {thread_num} {element_num}\
#                             > python_log_test_coo_large_scale/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
#         # os.system(runtime_record)
#         res_code, msg = run_cmd_runtime_out(runtime_record)
    
#     if (len(msg) != 0):
#         if (msg[:18] == "Timeout :Command '"):
#             if_runtime_out = True
#             profiling_feature = []
#             gflops = 0.1
#             return if_runtime_out, profiling_feature, pt.tensor(gflops).cuda()
            
            
#     profiling_txt_name = f'python_log_test_coo_large_scale/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log'
#     if not os.path.exists(profiling_txt_name):
#         profiling_txt = f'sudo CUDA_VISIBLE_DEVICES={rank + world_size} /usr/local/cuda-11.7/bin/ncu \
#                             -s 4 -c 1  --set full  \
#                             ./coo_atom_binary_profiling  data/matrix_suite_tar/{sparse_name}/{sparse_name} \
#                             {block_num} {thread_num} {element_num} \
#                             > python_log_test_coo_large_scale/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
#         # os.system(profiling_txt)
#         res_code, msg = run_cmd_runtime_out(profiling_txt, 20)
    
#     if (len(msg) != 0):
#         if (msg[:18] == "Timeout :Command '"):
#             if_runtime_out = True
#             profiling_feature = []
#             gflops = 0.1
#             return if_runtime_out, profiling_feature, pt.tensor(gflops).cuda()
        
    
#     return if_runtime_out, profiling_feature, pt.tensor(gflops).cuda()


def next_step_back_mp_coo_large_scale_twice_timeout(sparse_name, block_num, thread_num, element_num, rank, world_size, base = 0):
    
    save_path = '../python_log_coo_twice'
    
    runtime_record_name = f'{save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log'
    if not os.path.exists(runtime_record_name):
        runtime_record = f'CUDA_VISIBLE_DEVICES={rank + world_size + base} ./coo_atom_binary_twice_timeout  \
                            ../data/matrix_suite_tar/{sparse_name}/{sparse_name}  \
                            {block_num} {thread_num} {element_num} \
                            > {save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
        # os.system(runtime_record)
        run_cmd(runtime_record)
        time.sleep(time_sleep)
        
    profiling_txt_name = f'{save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log'
    if not os.path.exists(profiling_txt_name):
        profiling_txt = f'sudo CUDA_VISIBLE_DEVICES={rank + world_size + base} /usr/local/cuda-11.7/bin/ncu \
                            -s 4 -c 1  --set full  \
                            ./coo_atom_binary_profiling  ../data/matrix_suite_tar/{sparse_name}/{sparse_name} \
                            {block_num} {thread_num} {element_num}  \
                            > {save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
        # os.system(profiling_txt)
        run_cmd(profiling_txt)
        time.sleep(time_sleep)

    ###################
    
    # try:
    f = open(f'{save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    gflops = 0
    for line_id, line in enumerate(f.readlines()):
        # if (line_id == 1):
        #     numbers = re.findall(r"\d+\.?\d*", line)
        #     padding_rate = float(numbers[0])
        #     if (padding_rate > 3):
        #         raise Exception('padding rate > 3')
        
        if (line_id == 5):
            numbers = re.findall(r"\d+\.?\d*", line)
            gflops = float(numbers[1])
    f.close
    assert gflops != 0
    # except:
    #     print(f'python_log_test/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    #     print(gflops)
    #     input()

    ###################
    
    
    ###################
    f = open(f'{save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    profiling_feature = []
    if_data_line = False

    for line_id, line in enumerate(f.readlines()):
        # try:
        
        # if (line_id < 25):  continue
        if (line[:8] == '    ----' and len(line.split(' ')) == 7):
            if_data_line = not if_data_line
            continue
        
        if (if_data_line == True):
            line_tmp = line.split()[-1]
            line_tmp = line_tmp.replace(",", "")
            if (line_tmp == 'cudaFuncCachePreferNone'):
                profiling_feature.append(float(0))
            elif (line_tmp == 'cudaFuncCachePreferShared'):
                profiling_feature.append(float(1))
            elif (line_tmp == 'cudaFuncCachePreferL1'):
                profiling_feature.append(float(2))
            elif (line_tmp == 'cudaFuncCachePreferEqual'):
                profiling_feature.append(float(3))
            
            else:
                profiling_feature.append(float(line_tmp))
        
        # except:
        #     print(line_id)
        #     input()
    f.close

    profiling_feature = np.array(profiling_feature).reshape((1, -1))
    profiling_feature = np.mean(profiling_feature, axis=0)  # 62 features
    # print(profiling_feature.tolist())
    
    profiling_feature =  (profiling_feature - mean_profiling) / (std_profiling + 1e-6)
    # profiling_feature = (profiling_feature - min_profiling) / (max_profiling - min_profiling + 1e-6)
    #---------------------------
    
    return profiling_feature, gflops


def next_step_back_stable_mp_coo_large_scale_twice_timeout(sparse_name, block_num, thread_num, element_num, rank, world_size, base = 0):
    
    save_path = '../python_log_coo_twice'

    runtime_record = f'CUDA_VISIBLE_DEVICES={rank + world_size + base} ./coo_atom_binary_twice_timeout \
                        ../data/matrix_suite_tar/{sparse_name}/{sparse_name}  \
                         {block_num} {thread_num} {element_num} \
                        > {save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
    # os.system(runtime_record)
    run_cmd(runtime_record)
    time.sleep(time_sleep)
    profiling_txt = f'sudo CUDA_VISIBLE_DEVICES={rank + world_size + base} /usr/local/cuda-11.7/bin/ncu \
                        -s 4 -c 1  --set full  \
                        ./coo_atom_binary_profiling  ../data/matrix_suite_tar/{sparse_name}/{sparse_name} \
                         {block_num} {thread_num} {element_num}  \
                        > {save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
    # os.system(profiling_txt)
    run_cmd(profiling_txt)
    time.sleep(time_sleep)

    ###################
    
    # try:
    f = open(f'{save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    gflops = 0
    for line_id, line in enumerate(f.readlines()):
        # if (line_id == 1):
        #     numbers = re.findall(r"\d+\.?\d*", line)
        #     padding_rate = float(numbers[0])
        #     if (padding_rate > 3):
        #         raise Exception('padding rate > 3')
        
        if (line_id == 5):
            numbers = re.findall(r"\d+\.?\d*", line)
            gflops = float(numbers[1])
    f.close
    assert gflops != 0
    # except:
    #     print(f'python_log_test/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    #     print(gflops)
    #     input()

    ###################
    
    
    ###################
    f = open(f'{save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    profiling_feature = []
    if_data_line = False

    for line_id, line in enumerate(f.readlines()):
        # try:
        
        # if (line_id < 25):  continue
        if (line[:8] == '    ----' and len(line.split(' ')) == 7):
            if_data_line = not if_data_line
            continue
        
        if (if_data_line == True):
            line_tmp = line.split()[-1]
            line_tmp = line_tmp.replace(",", "")
            if (line_tmp == 'cudaFuncCachePreferNone'):
                profiling_feature.append(float(0))
            elif (line_tmp == 'cudaFuncCachePreferShared'):
                profiling_feature.append(float(1))
            elif (line_tmp == 'cudaFuncCachePreferL1'):
                profiling_feature.append(float(2))
            elif (line_tmp == 'cudaFuncCachePreferEqual'):
                profiling_feature.append(float(3))
            
            else:
                profiling_feature.append(float(line_tmp))
        
        # except:
        #     print(line_id)
        #     input()
    f.close

    profiling_feature = np.array(profiling_feature).reshape((1, -1))
    profiling_feature = np.mean(profiling_feature, axis=0)  # 62 features
    # print(profiling_feature.tolist())
    
    profiling_feature =  (profiling_feature - mean_profiling) / (std_profiling + 1e-6)
    # profiling_feature = (profiling_feature - min_profiling) / (max_profiling - min_profiling + 1e-6)

    #---------------------------
    
    return profiling_feature, gflops




def next_step_back_mp_coo_large_scale_twice_timeout_noprofiling(sparse_name, block_num, thread_num, element_num, rank, world_size, base = 0):
    
    
    save_path = '../python_log_coo_twice'
    
    runtime_record_name = f'{save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log'
    if not os.path.exists(runtime_record_name):
        runtime_record = f'CUDA_VISIBLE_DEVICES={rank + world_size + base} ./coo_atom_binary_twice_timeout  \
                            ../data/matrix_suite_tar/{sparse_name}/{sparse_name}  \
                            {block_num} {thread_num} {element_num} \
                            > {save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
        # os.system(runtime_record)
        run_cmd(runtime_record)
        time.sleep(time_sleep)
    
    '''
    profiling_txt_name = f'{save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log'
    if not os.path.exists(profiling_txt_name):
        profiling_txt = f'sudo CUDA_VISIBLE_DEVICES={rank + world_size + base} /usr/local/cuda-11.7/bin/ncu \
                            -s 4 -c 1  --set full  \
                            ./coo_atom_binary_profiling  ../data/matrix_suite_tar/{sparse_name}/{sparse_name} \
                            {block_num} {thread_num} {element_num}  \
                            > {save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
        # os.system(profiling_txt)
        run_cmd(profiling_txt)
        time.sleep(time_sleep)

    ###################
    '''
    
    # try:
    f = open(f'{save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    gflops = 0
    for line_id, line in enumerate(f.readlines()):
        # if (line_id == 1):
        #     numbers = re.findall(r"\d+\.?\d*", line)
        #     padding_rate = float(numbers[0])
        #     if (padding_rate > 3):
        #         raise Exception('padding rate > 3')
        
        if (line_id == 5):
            numbers = re.findall(r"\d+\.?\d*", line)
            gflops = float(numbers[1])
    f.close
    assert gflops != 0
    # except:
    #     print(f'python_log_test/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    #     print(gflops)
    #     input()

    ###################
    
    '''
    ###################
    f = open(f'{save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    profiling_feature = []
    if_data_line = False

    for line_id, line in enumerate(f.readlines()):
        # try:
        
        # if (line_id < 25):  continue
        if (line[:8] == '    ----' and len(line.split(' ')) == 7):
            if_data_line = not if_data_line
            continue
        
        if (if_data_line == True):
            line_tmp = line.split()[-1]
            line_tmp = line_tmp.replace(",", "")
            if (line_tmp == 'cudaFuncCachePreferNone'):
                profiling_feature.append(float(0))
            elif (line_tmp == 'cudaFuncCachePreferShared'):
                profiling_feature.append(float(1))
            elif (line_tmp == 'cudaFuncCachePreferL1'):
                profiling_feature.append(float(2))
            elif (line_tmp == 'cudaFuncCachePreferEqual'):
                profiling_feature.append(float(3))
            
            else:
                profiling_feature.append(float(line_tmp))
        
        # except:
        #     print(line_id)
        #     input()
    f.close

    profiling_feature = np.array(profiling_feature).reshape((1, -1))
    profiling_feature = np.mean(profiling_feature, axis=0)  # 62 features
    # print(profiling_feature.tolist())
    
    profiling_feature =  (profiling_feature - mean_profiling) / (std_profiling + 1e-6)
    # profiling_feature = (profiling_feature - min_profiling) / (max_profiling - min_profiling + 1e-6)
    #---------------------------
    return profiling_feature, gflops
    '''
    
    return gflops
    
    


def next_step_back_stable_mp_coo_large_scale_twice_timeout_noprofiling(sparse_name, block_num, thread_num, element_num, rank, world_size, base = 0):
    
    save_path = '../python_log_coo_twice'

    runtime_record = f'CUDA_VISIBLE_DEVICES={rank + world_size + base} ./coo_atom_binary_twice_timeout \
                        ../data/matrix_suite_tar/{sparse_name}/{sparse_name}  \
                         {block_num} {thread_num} {element_num} \
                        > {save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
    # os.system(runtime_record)
    run_cmd(runtime_record)
    time.sleep(time_sleep)
    
    '''
    profiling_txt = f'sudo CUDA_VISIBLE_DEVICES={rank + world_size + base} /usr/local/cuda-11.7/bin/ncu \
                        -s 4 -c 1  --set full  \
                        ./coo_atom_binary_profiling  ../data/matrix_suite_tar/{sparse_name}/{sparse_name} \
                         {block_num} {thread_num} {element_num}  \
                        > {save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
    # os.system(profiling_txt)
    run_cmd(profiling_txt)
    time.sleep(time_sleep)

    ###################
    '''
    
    # try:
    f = open(f'{save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    gflops = 0
    for line_id, line in enumerate(f.readlines()):
        # if (line_id == 1):
        #     numbers = re.findall(r"\d+\.?\d*", line)
        #     padding_rate = float(numbers[0])
        #     if (padding_rate > 3):
        #         raise Exception('padding rate > 3')
        
        if (line_id == 5):
            numbers = re.findall(r"\d+\.?\d*", line)
            gflops = float(numbers[1])
    f.close
    assert gflops != 0
    # except:
    #     print(f'python_log_test/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    #     print(gflops)
    #     input()

    ###################
    
    
    '''
    ###################
    f = open(f'{save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    profiling_feature = []
    if_data_line = False

    for line_id, line in enumerate(f.readlines()):
        # try:
        
        # if (line_id < 25):  continue
        if (line[:8] == '    ----' and len(line.split(' ')) == 7):
            if_data_line = not if_data_line
            continue
        
        if (if_data_line == True):
            line_tmp = line.split()[-1]
            line_tmp = line_tmp.replace(",", "")
            if (line_tmp == 'cudaFuncCachePreferNone'):
                profiling_feature.append(float(0))
            elif (line_tmp == 'cudaFuncCachePreferShared'):
                profiling_feature.append(float(1))
            elif (line_tmp == 'cudaFuncCachePreferL1'):
                profiling_feature.append(float(2))
            elif (line_tmp == 'cudaFuncCachePreferEqual'):
                profiling_feature.append(float(3))
            
            else:
                profiling_feature.append(float(line_tmp))
        
        # except:
        #     print(line_id)
        #     input()
    f.close

    profiling_feature = np.array(profiling_feature).reshape((1, -1))
    profiling_feature = np.mean(profiling_feature, axis=0)  # 62 features
    # print(profiling_feature.tolist())
    
    profiling_feature =  (profiling_feature - mean_profiling) / (std_profiling + 1e-6)
    # profiling_feature = (profiling_feature - min_profiling) / (max_profiling - min_profiling + 1e-6)

    #---------------------------
    
    return profiling_feature, gflops
    '''
    
    return gflops





####### read sorted, need element_num converson from binary_sorted

def next_step_back_mp_coo_conversion_large_scale_twice_timeout(sparse_name, block_num, thread_num, element_num, rank, world_size, base = 0):
    
    
    save_path = '../python_log_coo_twice'
    
    runtime_record_name = f'{save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log'
    if not os.path.exists(runtime_record_name):
        runtime_record = f'CUDA_VISIBLE_DEVICES={rank + world_size + base} ./coo_atom_binary_twice_timeout  \
                            ../data/matrix_suite_tar/{sparse_name}/{sparse_name}  \
                            {block_num} {thread_num} {element_num} \
                            > {save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
        # os.system(runtime_record)
        run_cmd(runtime_record)
        time.sleep(time_sleep)
        
    profiling_txt_name = f'{save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log'
    if not os.path.exists(profiling_txt_name):
        profiling_txt = f'sudo CUDA_VISIBLE_DEVICES={rank + world_size + base} /usr/local/cuda-11.7/bin/ncu \
                            -s 4 -c 1  --set full  \
                            ./coo_atom_binary_profiling  ../data/matrix_suite_tar/{sparse_name}/{sparse_name} \
                            {block_num} {thread_num} {element_num}  \
                            > {save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
        # os.system(profiling_txt)
        run_cmd(profiling_txt)
        time.sleep(time_sleep)

    ###################
    
    # try:
    f = open(f'{save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    gflops = 0
    for line_id, line in enumerate(f.readlines()):
        # if (line_id == 1):
        #     numbers = re.findall(r"\d+\.?\d*", line)
        #     padding_rate = float(numbers[0])
        #     if (padding_rate > 3):
        #         raise Exception('padding rate > 3')
        
        if (line_id == 5):
            numbers = re.findall(r"\d+\.?\d*", line)
            gflops = float(numbers[1])
    f.close
    assert gflops != 0
    # except:
    #     print(f'python_log_test/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    #     print(gflops)
    #     input()

    ###################
    
    
    ###################
    f = open(f'{save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    profiling_feature = []
    if_data_line = False

    for line_id, line in enumerate(f.readlines()):
        # try:
        
        # if (line_id < 25):  continue
        if (line[:8] == '    ----' and len(line.split(' ')) == 7):
            if_data_line = not if_data_line
            continue
        
        if (if_data_line == True):
            line_tmp = line.split()[-1]
            line_tmp = line_tmp.replace(",", "")
            if (line_tmp == 'cudaFuncCachePreferNone'):
                profiling_feature.append(float(0))
            elif (line_tmp == 'cudaFuncCachePreferShared'):
                profiling_feature.append(float(1))
            elif (line_tmp == 'cudaFuncCachePreferL1'):
                profiling_feature.append(float(2))
            elif (line_tmp == 'cudaFuncCachePreferEqual'):
                profiling_feature.append(float(3))
            
            else:
                profiling_feature.append(float(line_tmp))
        
        # except:
        #     print(line_id)
        #     input()
    f.close

    profiling_feature = np.array(profiling_feature).reshape((1, -1))
    profiling_feature = np.mean(profiling_feature, axis=0)  # 62 features
    # print(profiling_feature.tolist())
    
    profiling_feature =  (profiling_feature - mean_profiling) / (std_profiling + 1e-6)
    # profiling_feature = (profiling_feature - min_profiling) / (max_profiling - min_profiling + 1e-6)
    #---------------------------
    
    return profiling_feature, gflops



def next_step_back_stable_mp_coo_conversion_large_scale_twice_timeout(sparse_name, block_num, thread_num, element_num, rank, world_size, base = 0):
    
    save_path = '../python_log_coo_twice'
    
    runtime_record = f'CUDA_VISIBLE_DEVICES={rank + world_size + base} ./coo_atom_binary_twice_timeout \
                        ../data/matrix_suite_tar/{sparse_name}/{sparse_name}  \
                         {block_num} {thread_num} {element_num} \
                        > {save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
    # os.system(runtime_record)
    run_cmd(runtime_record)
    time.sleep(time_sleep)
    profiling_txt = f'sudo CUDA_VISIBLE_DEVICES={rank + world_size + base} /usr/local/cuda-11.7/bin/ncu \
                        -s 4 -c 1  --set full  \
                        ./coo_atom_binary_profiling  ../data/matrix_suite_tar/{sparse_name}/{sparse_name} \
                         {block_num} {thread_num} {element_num}  \
                        > {save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log 2>&1'
    # os.system(profiling_txt)
    run_cmd(profiling_txt)
    time.sleep(time_sleep)

    ###################
    
    # try:
    f = open(f'{save_path}/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    gflops = 0
    for line_id, line in enumerate(f.readlines()):
        # if (line_id == 1):
        #     numbers = re.findall(r"\d+\.?\d*", line)
        #     padding_rate = float(numbers[0])
        #     if (padding_rate > 3):
        #         raise Exception('padding rate > 3')
        
        if (line_id == 5):
            numbers = re.findall(r"\d+\.?\d*", line)
            gflops = float(numbers[1])
    f.close
    assert gflops != 0
    # except:
    #     print(f'python_log_test/runtime_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    #     print(gflops)
    #     input()

    ###################
    
    
    ###################
    f = open(f'{save_path}/report_{sparse_name}_{block_num}_{thread_num}_{element_num}.log')
    profiling_feature = []
    if_data_line = False

    for line_id, line in enumerate(f.readlines()):
        # try:
        
        # if (line_id < 25):  continue
        if (line[:8] == '    ----' and len(line.split(' ')) == 7):
            if_data_line = not if_data_line
            continue
        
        if (if_data_line == True):
            line_tmp = line.split()[-1]
            line_tmp = line_tmp.replace(",", "")
            if (line_tmp == 'cudaFuncCachePreferNone'):
                profiling_feature.append(float(0))
            elif (line_tmp == 'cudaFuncCachePreferShared'):
                profiling_feature.append(float(1))
            elif (line_tmp == 'cudaFuncCachePreferL1'):
                profiling_feature.append(float(2))
            elif (line_tmp == 'cudaFuncCachePreferEqual'):
                profiling_feature.append(float(3))
            
            else:
                profiling_feature.append(float(line_tmp))
        
        # except:
        #     print(line_id)
        #     input()
    f.close

    profiling_feature = np.array(profiling_feature).reshape((1, -1))
    profiling_feature = np.mean(profiling_feature, axis=0)  # 62 features
    # print(profiling_feature.tolist())
    
    profiling_feature =  (profiling_feature - mean_profiling) / (std_profiling + 1e-6)
    # profiling_feature = (profiling_feature - min_profiling) / (max_profiling - min_profiling + 1e-6)

    #---------------------------
    
    return profiling_feature, gflops

