Spaces:
				
			
			
	
			
			
					
		Running
		
	
	
	
			
			
	
	
	
	
		
		
					
		Running
		
	
		Nathan Habib
		
	commited on
		
		
					Commit 
							
							·
						
						5ced480
	
1
								Parent(s):
							
							bdabf34
								
adds script to create a request file for any model
Browse files- scripts/create_request_file.py +104 -0
    	
        scripts/create_request_file.py
    ADDED
    
    | @@ -0,0 +1,104 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from datetime import datetime, timezone
         | 
| 2 | 
            +
            import json
         | 
| 3 | 
            +
            import os
         | 
| 4 | 
            +
            import re
         | 
| 5 | 
            +
            import click
         | 
| 6 | 
            +
            from huggingface_hub import HfApi, snapshot_download
         | 
| 7 | 
            +
            from colorama import Fore
         | 
| 8 | 
            +
            import pprint
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            EVAL_REQUESTS_PATH = "eval-queue"
         | 
| 11 | 
            +
            QUEUE_REPO = "open-llm-leaderboard/requests"
         | 
| 12 | 
            +
             | 
| 13 | 
            +
            precisions =("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ")
         | 
| 14 | 
            +
            model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned")
         | 
| 15 | 
            +
            weight_types = ("Original", "Delta", "Adapter")
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            def get_model_size(model_info, precision: str):
         | 
| 18 | 
            +
                size_pattern = size_pattern = re.compile(r"(\d\.)?\d+(b|m)")
         | 
| 19 | 
            +
                try:
         | 
| 20 | 
            +
                    model_size = round(model_info.safetensors["total"] / 1e9, 3)
         | 
| 21 | 
            +
                except AttributeError:
         | 
| 22 | 
            +
                    try:
         | 
| 23 | 
            +
                        size_match = re.search(size_pattern, model_info.modelId.lower())
         | 
| 24 | 
            +
                        model_size = size_match.group(0)
         | 
| 25 | 
            +
                        model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3)
         | 
| 26 | 
            +
                    except AttributeError:
         | 
| 27 | 
            +
                        return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
         | 
| 28 | 
            +
             | 
| 29 | 
            +
                size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
         | 
| 30 | 
            +
                model_size = size_factor * model_size
         | 
| 31 | 
            +
                return model_size
         | 
| 32 | 
            +
             | 
| 33 | 
            +
            def main():
         | 
| 34 | 
            +
                api = HfApi()
         | 
| 35 | 
            +
                current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
         | 
| 36 | 
            +
                snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH, repo_type="dataset")
         | 
| 37 | 
            +
             | 
| 38 | 
            +
                model_name = click.prompt("Enter model name")
         | 
| 39 | 
            +
                revision = click.prompt("Enter revision", default="main")
         | 
| 40 | 
            +
                precision = click.prompt("Enter precision", default="float16", type=click.Choice(precisions))
         | 
| 41 | 
            +
                model_type = click.prompt("Enter model type", type=click.Choice(model_types))
         | 
| 42 | 
            +
                weight_type = click.prompt("Enter weight type", default="Original", type=click.Choice(weight_types))
         | 
| 43 | 
            +
                base_model = click.prompt("Enter base model", default="")
         | 
| 44 | 
            +
                status = click.prompt("Enter status", default="FINISHED")
         | 
| 45 | 
            +
             | 
| 46 | 
            +
                try:
         | 
| 47 | 
            +
                    model_info = api.model_info(repo_id=model_name, revision=revision)
         | 
| 48 | 
            +
                except Exception as e:
         | 
| 49 | 
            +
                    print(f"{Fore.RED}Could not find model info for {model_name} on the Hub\n{e}{Fore.RESET}")
         | 
| 50 | 
            +
                    return 1
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                model_size = get_model_size(model_info=model_info , precision=precision) 
         | 
| 53 | 
            +
             | 
| 54 | 
            +
                try:
         | 
| 55 | 
            +
                    license = model_info.cardData["license"]
         | 
| 56 | 
            +
                except Exception:
         | 
| 57 | 
            +
                    license = "?"
         | 
| 58 | 
            +
             | 
| 59 | 
            +
                eval_entry = {
         | 
| 60 | 
            +
                    "model": model_name,
         | 
| 61 | 
            +
                    "base_model": base_model,
         | 
| 62 | 
            +
                    "revision": revision,
         | 
| 63 | 
            +
                    "private": False,
         | 
| 64 | 
            +
                    "precision": precision,
         | 
| 65 | 
            +
                    "weight_type": weight_type,
         | 
| 66 | 
            +
                    "status": status,
         | 
| 67 | 
            +
                    "submitted_time": current_time,
         | 
| 68 | 
            +
                    "model_type": model_type,
         | 
| 69 | 
            +
                    "likes": model_info.likes,
         | 
| 70 | 
            +
                    "params": model_size,
         | 
| 71 | 
            +
                    "license": license,
         | 
| 72 | 
            +
                }
         | 
| 73 | 
            +
             | 
| 74 | 
            +
                user_name = ""
         | 
| 75 | 
            +
                model_path = model_name
         | 
| 76 | 
            +
                if "/" in model_name:
         | 
| 77 | 
            +
                    user_name = model_name.split("/")[0]
         | 
| 78 | 
            +
                    model_path = model_name.split("/")[1]
         | 
| 79 | 
            +
             | 
| 80 | 
            +
                pprint.pprint(eval_entry)
         | 
| 81 | 
            +
             | 
| 82 | 
            +
                if click.confirm("Do you want to continue? This request file will be pushed to the hub"):
         | 
| 83 | 
            +
                    click.echo("continuing...")
         | 
| 84 | 
            +
             | 
| 85 | 
            +
                    out_dir = f"{EVAL_REQUESTS_PATH}/{user_name}"
         | 
| 86 | 
            +
                    os.makedirs(out_dir, exist_ok=True)
         | 
| 87 | 
            +
                    out_path = f"{out_dir}/{model_path}_eval_request_{False}_{precision}_{weight_type}.json"
         | 
| 88 | 
            +
             | 
| 89 | 
            +
                    with open(out_path, "w") as f:
         | 
| 90 | 
            +
                        f.write(json.dumps(eval_entry))
         | 
| 91 | 
            +
             | 
| 92 | 
            +
                    api.upload_file(
         | 
| 93 | 
            +
                        path_or_fileobj=out_path,
         | 
| 94 | 
            +
                        path_in_repo=out_path.split(f"{EVAL_REQUESTS_PATH}/")[1],
         | 
| 95 | 
            +
                        repo_id=QUEUE_REPO,
         | 
| 96 | 
            +
                        repo_type="dataset",
         | 
| 97 | 
            +
                        commit_message=f"Add {model_name} to eval queue",
         | 
| 98 | 
            +
                    )
         | 
| 99 | 
            +
                else:
         | 
| 100 | 
            +
                    click.echo("aborting...")
         | 
| 101 | 
            +
                
         | 
| 102 | 
            +
             | 
| 103 | 
            +
            if __name__ == '__main__':
         | 
| 104 | 
            +
                main()
         | 
