# java -Xms14000m -Xmx140042m -Xmn700m -Xss16m -jar ./RankLib-2.16.jar -train ./rank_data/cmssw/cmssw_svm_rank_format_train_data.txt -test ./rank_data/cmssw/cmssw_svm_rank_format_test_data.txt -ranker 6 -layer 2 -norm zscore -shrinkage 0.0001 -tree 500 -tc -1 -leaf 100 -mls 10 -gmax 12  -metric2t NDCG@10 -metric2T NDCG@10 -lr 0.00001 -save ./rank_model/cmssw/cmssw_LambdaMART_model.txt
# java -jar ./RankLib-2.16.jar -load ./rank_model/cmssw/cmssw_LambdaMART_model.txt -rank ./rank_data/cmssw/cmssw_svm_rank_format_year_data.txt -gmax 12 -norm zscore -indri ./rank_model/cmssw/result/cmssw_result_LambdaMART.txt
# import pandas as pd
# test_sort_result_path="./rank_model/" + "cmssw" + "/result/" + "cmssw" + "_result" + "_" + "LambdaMART" + ".txt"
# df = pd.read_table(test_sort_result_path, sep=' ',
#                    header=None)
# col_index = 2
# df = df.sort_values(by=col_index)
# test_sort_result_path = test_sort_result_path[:test_sort_result_path.find('.txt')]
# df.to_excel(f'{test_sort_result_path}_sorted.xlsx', index=False)


# java -Xms14000m -Xmx140042m -Xmn700m -Xss16m -jar ./RankLib-2.16.jar -train ./rank_data/kubernetes/kubernetes_svm_rank_format_train_data.txt -test ./rank_data/kubernetes/kubernetes_svm_rank_format_test_data.txt -ranker 6 -layer 2 -norm zscore -shrinkage 0.0001 -tree 500 -tc -1 -leaf 100 -mls 10 -gmax 12  -metric2t NDCG@10 -metric2T NDCG@10 -lr 0.00001 -save ./rank_model/kubernetes/kubernetes_LambdaMART_model.txt
# java -jar ./RankLib-2.16.jar -load ./rank_model/kubernetes/kubernetes_LambdaMART_model.txt -rank ./rank_data/kubernetes/kubernetes_svm_rank_format_year_data.txt -gmax 12 -norm zscore -indri ./rank_model/kubernetes/result/kubernetes_result_LambdaMART.txt
# import pandas as pd
# test_sort_result_path="./rank_model/" + "kubernetes" + "/result/" + "kubernetes" + "_result" + "_" + "LambdaMART" + ".txt"
# df = pd.read_table(test_sort_result_path, sep=' ',
#                    header=None)
# col_index = 2
# df = df.sort_values(by=col_index)
# test_sort_result_path = test_sort_result_path[:test_sort_result_path.find('.txt')]
# df.to_excel(f'{test_sort_result_path}_sorted.xlsx', index=False)


# java -Xms14000m -Xmx140042m -Xmn700m -Xss16m -jar ./RankLib-2.16.jar -train ./rank_data/rust/rust_svm_rank_format_train_data.txt -test ./rank_data/rust/rust_svm_rank_format_test_data.txt -ranker 6 -layer 2 -norm zscore -shrinkage 0.0001 -tree 500 -tc -1 -leaf 100 -mls 10 -gmax 12  -metric2t NDCG@10 -metric2T NDCG@10 -lr 0.00001 -save ./rank_model/rust/rust_LambdaMART_model.txt
# java -jar ./RankLib-2.16.jar -load ./rank_model/rust/rust_LambdaMART_model.txt -rank ./rank_data/rust/rust_svm_rank_format_year_data.txt -gmax 12 -norm zscore -indri ./rank_model/rust/result/rust_result_LambdaMART.txt
# import pandas as pd
# test_sort_result_path="./rank_model/" + "rust" + "/result/" + "rust" + "_result" + "_" + "LambdaMART" + ".txt"
# df = pd.read_table(test_sort_result_path, sep=' ',
#                    header=None)
# col_index = 2
# df = df.sort_values(by=col_index)
# test_sort_result_path = test_sort_result_path[:test_sort_result_path.find('.txt')]
# df.to_excel(f'{test_sort_result_path}_sorted.xlsx', index=False)

# java -Xms14000m -Xmx140042m -Xmn700m -Xss16m -jar ./RankLib-2.16.jar -train ./rank_data/salt/salt_svm_rank_format_train_data.txt -test ./rank_data/salt/salt_svm_rank_format_test_data.txt -ranker 6 -layer 2 -norm zscore -shrinkage 0.0001 -tree 500 -tc -1 -leaf 100 -mls 10 -gmax 12  -metric2t NDCG@10 -metric2T NDCG@10 -lr 0.00001 -save ./rank_model/salt/salt_LambdaMART_model.txt
# java -jar ./RankLib-2.16.jar -load ./rank_model/salt/salt_LambdaMART_model.txt -rank ./rank_data/salt/salt_svm_rank_format_year_data.txt -gmax 12 -norm zscore -indri ./rank_model/salt/result/salt_result_LambdaMART.txt
# import pandas as pd
# test_sort_result_path="./rank_model/" + "salt" + "/result/" + "salt" + "_result" + "_" + "LambdaMART" + ".txt"
# df = pd.read_table(test_sort_result_path, sep=' ',
#                    header=None)
# col_index = 2
# df = df.sort_values(by=col_index)
# test_sort_result_path = test_sort_result_path[:test_sort_result_path.find('.txt')]
# df.to_excel(f'{test_sort_result_path}_sorted.xlsx', index=False)


# import pandas as pd
# test_sort_result_path="./rank_model/" + "cmssw" + "/result/" + "cmssw" + "_result" + "_" + "ListNet" + ".txt"
# df = pd.read_table(test_sort_result_path, sep=' ',
#                    header=None)
# col_index = 2
# df = df.sort_values(by=col_index)
# test_sort_result_path = test_sort_result_path[:test_sort_result_path.find('.txt')]
# df.to_excel(f'{test_sort_result_path}_sorted.xlsx', index=False)

# java -jar ./RankLib-2.16.jar -train ./rank_data/tensorflow/tensorflow_svm_rank_format_train_data.txt -test ./rank_data/tensorflow/tensorflow_svm_rank_format_test_data.txt -ranker 3  -max 20 -tolerance 0.002   -norm sum -gmax 12 -save ./rank_model/tensorflow/tensorflow_AdaRank_model.txt
# java -jar ./RankLib-2.16.jar -load ./rank_model/tensorflow/tensorflow_AdaRank_model.txt -rank ./rank_data/tensorflow/tensorflow_svm_rank_format_year_data.txt -gmax 12 -norm sum -indri ./rank_model/tensorflow/result/tensorflow_result_AdaRank.txt

# import pandas as pd
# test_sort_result_path="./rank_model/" + "tensorflow" + "/result/" + "tensorflow" + "_result" + "_" + "AdaRank" + ".txt"
# df = pd.read_table(test_sort_result_path, sep=' ',
#                    header=None)
# col_index = 2
# df = df.sort_values(by=col_index)
# test_sort_result_path = test_sort_result_path[:test_sort_result_path.find('.txt')]
# df.to_excel(f'{test_sort_result_path}_sorted.xlsx', index=False)
# #
# import pandas as pd
# test_sort_result_path="./rank_model/" + "tensorflow" + "/result/" + "tensorflow" + "_result" + "_" + "ListNet" + ".txt"
# df = pd.read_table(test_sort_result_path, sep=' ',
#                    header=None)
# col_index = 2
# df = df.sort_values(by=col_index)
# test_sort_result_path = test_sort_result_path[:test_sort_result_path.find('.txt')]
# df.to_excel(f'{test_sort_result_path}_sorted.xlsx', index=False)

# 从rank_model/{repo_name}/result/rank_eval/{repo_name}_sum_result.csv中读取数据，这些csv储存的数据标题为   算法,rear（false）——top（true）,ndcg,mrr,kendall_tau_distance，分别对应算法名称，（True或False），ndcg数值，mrr数值，kendall_tau_distance数值
# 找出每个csv文件中rear（false）——top（true）这一列为True 且 ndcg 最大的算法名称，找出每个csv文件中rear（false）——top（true）这一列为True 且 mrr 最大的算法名称 找出每个csv文件中rear（false）——top（true）这一列为True 且 kendall_tau_distance数值 最小的算法名称
# 找出每个csv文件中rear（false）——top（true）这一列为false 且 ndcg 最大的算法名称，找出每个csv文件中rear（false）——top（true）这一列为false 且 mrr 最大的算法名称 找出每个csv文件中rear（false）——top（true）这一列为false 且 kendall_tau_distance数值 最小的算法名称
# 并将上面找出的数据保存到新的static.csv文件中，按照repo_name，true/false，ndcg最大的算法名称，mrr最大的算法名称，kendall_tau_distance最小的算法名称

# 请尝试以下代码来解决您的问题：

import pandas as pd
import os

repo_list = ["cmssw", "django", "kubernetes", "laravel", "moby", "opencv","pandas", "rails", "react", "rust", "salt",
             "scikit-learn", "symfony", "tensorflow","terraform", "yii2"]  # 请将您的repo列表放在这里

# 遍历repo_list
output_data = {'repo_name': [], 'true/false': [], 'ndcg_max_algo': [], 'mrr_max_algo': [], 'kendall_min_algo': [],'sum_max_algo': []}

for repo_name in repo_list:
    file_path = f"rank_model/{repo_name}/result/rank_eval/{repo_name}_new_sum_result.csv"
    if os.path.exists(file_path):
        data = pd.read_csv(file_path)

        # 找出rear（false）——top（true）为True且ndcg最大的算法名称
        true_ndcg_max = data.loc[data[data['rear（false）——top（true）'] == True]['ndcg'].idxmax()]['算法']

        # 找出rear（false）——top（true）为True且mrr最大的算法名称
        true_mrr_max = data.loc[data[data['rear（false）——top（true）'] == True]['mrr'].idxmax()]['算法']

        # 找出rear（false）——top（true）为True且kendall_tau_distance最小的算法名称
        true_kendall_min = data.loc[data[data['rear（false）——top（true）'] == True]['kendall_tau_distance'].idxmin()][
            '算法']
        true_sum_max = data.loc[data[data['rear（false）——top（true）'] == True]['Sum'].idxmax()][
            '算法']

        # 找出rear（false）——top（true）为False且ndcg最大的算法名称
        false_ndcg_max = data.loc[data[data['rear（false）——top（true）'] == False]['ndcg'].idxmax()]['算法']

        # 找出rear（false）——top（true）为False且mrr最大的算法名称
        false_mrr_max = data.loc[data[data['rear（false）——top（true）'] == False]['mrr'].idxmax()]['算法']

        # 找出rear（false）——top（true）为False且kendall_tau_distance最小的算法名称
        false_kendall_min = data.loc[data[data['rear（false）——top（true）'] == False]['kendall_tau_distance'].idxmin()][
            '算法']
        false_sum_max = data.loc[data[data['rear（false）——top（true）'] == False]['Sum'].idxmax()][
            '算法']

        # 保存到output_data
        output_data['repo_name'].extend([repo_name, repo_name])
        output_data['true/false'].extend(['true', 'false'])
        output_data['ndcg_max_algo'].extend([true_ndcg_max, false_ndcg_max])
        output_data['mrr_max_algo'].extend([true_mrr_max, false_mrr_max])
        output_data['kendall_min_algo'].extend([true_kendall_min, false_kendall_min])
        output_data['sum_max_algo'].extend([true_sum_max, false_sum_max])

# 保存到新的static.csv文件
output_df = pd.DataFrame(output_data)
output_df.to_csv('static.csv', index=False)
