from data_selection import CachedHashedNgramDSIR
import os, json

_DEFAULT_FEAT_CONFIG = {
    "name": "qwen_tokenizer_3_300000",
    "path": "/share/projset/dsir7/indexes/qwen_tokenizer_3_300000",
    "config": {
        "tokenizer": "/share/project/bowen/qwen_tokenizer",
        "ngrams": 3,
        "num_buckets": 300000
    }
}

class SimpleDataSelector(object):
    def __init__(self, feat_config=_DEFAULT_FEAT_CONFIG):
        self.config = feat_config.get('config', _DEFAULT_FEAT_CONFIG['config'])
        self.tokenizer = self.config['tokenizer']
        self.ngrams = self.config['ngrams']
        self.num_buckets = self.config['num_buckets']
        self.num_proc = self.config.get('num_proc', None)
        self.indexer = None
        self.selector = None
    
    def build_index(self, raw_datasets):
        self.indexer = CachedHashedNgramDSIR(raw_datasets, None, tokenizer=self.tokenizer, ngrams=self.ngrams, num_buckets=self.num_buckets, num_proc=self.num_proc)
        self.indexer._cached_fit_bow(raw_datasets, num_tokens_to_fit=None)

    def compute_importance(self, raw_datasets, target_datasets, min_example_length=100):
        raw_datasets = sorted(raw_datasets)
        target_datasets = sorted(target_datasets)
        if  self.selector and self.selector.raw_datasets == raw_datasets and self.selector.target_datasets == target_datasets and self.selector.min_example_length == min_example_length:
            print('REUSE SELECTOR')
        else:
            self.selector = CachedHashedNgramDSIR(raw_datasets, target_datasets, tokenizer=self.tokenizer, ngrams=self.ngrams, num_buckets=self.num_buckets, num_proc=self.num_proc, min_example_length=min_example_length)
        self.selector.fit_importance_estimator(num_tokens_to_fit='all')
        self.selector.cached_compute_importance_weights()

    def search(self, raw_datasets, target_datasets, num_to_sample, min_example_length=100):
        self.compute_importance(raw_datasets, target_datasets, min_example_length)
        self.selector.resample(out_dir=self.selector.cache_dir / 'resampled', num_to_sample=num_to_sample)

def main():
    import argparse
    parser = argparse.ArgumentParser(description='Simple Data Selection with DSIR')
    parser.add_argument("--feat_config", type=str, default=None, help="the path to indexer configuration file")
    parser.add_argument("--raw_datasets", type=str, default=None, help="the list of raw dataset paths, separated by comma")
    parser.add_argument("--target_datasets", type=str, default=None, help="the list of target dataset paths, separated by comma")
    parser.add_argument("--min_example_length", type=int, default=100, help="min length of extracted examples")
    parser.add_argument("--num_to_sample", type=int, default=None, help="num_to_retrieve")
    
    args = parser.parse_args()
    print(args)
    
    feat_config = _DEFAULT_FEAT_CONFIG
    if args.feat_config:
        if os.path.isdir(args.feat_config):
            feat_config = json.load(open(args.feat_config))
    
    data_selector = SimpleDataSelector(feat_config)
    
    if args.raw_datasets and args.target_datasets:
        raw_datasets = args.raw_datasets.split(',')
        target_datasets = args.target_datasets.split(',')
        if args.num_to_sample:
            data_selector.search(raw_datasets, target_datasets, args.num_to_sample, args.min_example_length)
        else:
            data_selector.compute_importance(raw_datasets, target_datasets, args.min_example_length)
    elif args.raw_datasets:
        raw_datasets = args.raw_datasets.split(',')
        data_selector.build_index(raw_datasets)


if __name__ == "__main__":
    main()

