import os from collections import defaultdict def stat_size(fnames): cnt = 0 for fname in fnames: with open(fname, 'r') as f: for line in f: cnt += 1 return cnt dataset2fname = defaultdict(lambda :defaultdict(list)) final_task_files = [] srcs = [ 'bge-m3', 'medi', 'mteb-Classification', 'mteb-Clustering', 'mteb-PairClassification', 'mteb-Reranking', 'mteb-Retrieval', 'mteb-Retrieval_aug', 'mteb-STS', ] for src in srcs: src_path = f'/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/{src}' for fname in os.listdir(src_path): if not fname.endswith('.jsonl'): continue _, dataset, lang = fname[:-6].split('_') if src.startswith('mteb') and lang != 'default' and not lang.startswith('en'): continue # if lang == 'default' or lang.startswith('en'): fname = os.path.join(src_path, fname) dataset2fname[dataset][src].append(fname) for dataset, item in dataset2fname.items(): if len(item) == 1: fnames = item[list(item.keys())[0]] final_task_files.extend(fnames) else: max_size = -1 max_size_src = None for src, fnames in item.items(): size = stat_size(fnames) if size > max_size: max_size = size max_size_src = src fnames = item[max_size_src] final_task_files.extend(fnames) # with open('/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/task_files.txt', 'w') as f: # for task_file in final_task_files: # if 'Classification' in task_file: # line = f'{task_file}\tclassification' # else: # line = f'{task_file}\tdefault' # f.write(line + '\n') # from tqdm import tqdm # cnt = 0 # for task_file in tqdm(final_task_files): # with open(task_file, 'r') as f: # for line in f: # cnt += 1 # fnames = [] # with open('/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/task_files.txt', 'r') as f: # for line in f: # fname, task_type = line.strip().split('\t') # fnames.append(fname) with open('/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/p+a_aug_en_task_files.txt', 'w') as f: for fname in final_task_files: if 'classification' in fname.lower(): task_type = 'classification' elif 'medi_task' in fname.lower(): task_type = 'super-NI' elif 'clustering' in fname.lower(): task_type = 'clustering' elif 'sts' in fname.lower(): task_type = 'sts' else: task_type = 'default' # f.write(f'{fname}\t{task_type}\t-1\n') f.write(f'{fname}\t-1\n') # def stat_size(fnames): # cnt = 0 # for fname in fnames: # with open(fname, 'r') as f: # for line in f: # cnt += 1 # return cnt # f_in = open('/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/p+a_task_files.txt', 'r') # f_out = open('/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/p+a_task_files_en.txt', 'w') # for line in f_in: # file, type_, size = line.strip().split('\t') # lang = file[:-6].split('_')[-1] # if lang == 'default' or lang.startswith('en'): # f_out.write(line) # f_in.close() # f_out.close()