type
stringclasses 1
value | id
stringlengths 5
122
| num_branches
int64 1
1.76k
| branches
sequencelengths 1
1.76k
| main_branch_size
int64 0
32,943B
|
---|---|---|---|---|
model | mbrad/Stop_Signs | 1 | [
"main"
] | 1,519 |
model | HengZ121/ms-marco-MiniLM-L6V2-rewritten | 1 | [
"main"
] | 91,818,533 |
model | SourAsslips/Daddy | 1 | [
"main"
] | 1,446,035,724 |
model | SourAsslips/Gavin | 1 | [
"main"
] | 1,446,034,874 |
model | SourAsslips/Jessica | 1 | [
"main"
] | 1,446,034,877 |
model | Grayx/stable_jp3_12121 | 1 | [
"main"
] | 3,295,853,098 |
model | SourAsslips/Linda | 1 | [
"main"
] | 1,446,032,393 |
model | sujithvemi/whisper-medium-physician-dictation-gpt-4-turbo | 1 | [
"main"
] | 3,057,826,894 |
model | benjamin/zett-hypernetwork-TinyLlama-1.1B-intermediate-step-1431k-3T | 1 | [
"main"
] | 1,365,514,966 |
model | sujithvemi/whisper-large-physician-dictation-gpt-4-turbo | 1 | [
"main"
] | 1,519 |
model | kumarme072/model_med_195_E | 1 | [
"main"
] | 682,581,471 |
model | LightXXXXX/llama-2-7b-light | 1 | [
"main"
] | 13,479,328,407 |
model | Luca-Engel/finetuned_text_class | 1 | [
"main"
] | 268,786,071 |
model | hmandsager/detr-resnet-50_finetuned_cppe5 | 1 | [
"main"
] | 166,508,788 |
model | bcjeong/Solar_ko_alpaca | 1 | [
"main"
] | 3,634,908 |
model | davelotito/donut_experiment_3 | 1 | [
"main"
] | 809,125,362 |
model | dbaek111/Llama-2-7b-chat-hf-Elon_1000 | 1 | [
"main"
] | 4,829,132,333 |
model | K1sven/Zerik-2 | 1 | [
"main"
] | 1,426,671,845 |
model | benjamin/zett-hypernetwork-Mistral-7B-v0.1 | 1 | [
"main"
] | 5,423,889,272 |
model | miibanl/ModeloTextosEconomicos | 1 | [
"main"
] | 132,190,232 |
model | AbrahamKlb/speaker_emb | 1 | [
"main"
] | 4,802 |
model | kunwarshesh/Test-copy-for-mbart_ru_sum_gazeta | 1 | [
"main"
] | 1,543 |
model | mjrdbds/llama3-4b-classifierunsloth-130524 | 1 | [
"main"
] | 16,069,720,105 |
model | NikolayKozloff/llama3_8b_chat_brainstorm-Q6_K-GGUF | 1 | [
"main"
] | 6,596,008,783 |
model | LightEmbed/sbert-all-MiniLM-L6-v2-onnx | 1 | [
"main"
] | 91,435,409 |
model | phamngocbao/qa | 1 | [
"main"
] | 1,519 |
model | Rodr16020/GNS3_Python_Code_Llama-2-Chat-Seele-v_2 | 1 | [
"main"
] | 26,034,194,577 |
model | benjamin/zett-hypernetwork-multilingual-Mistral-7B-v0.1 | 1 | [
"main"
] | 5,424,741,729 |
model | sanchit42/Mistral-7b-4bit-finetune_2 | 1 | [
"main"
] | 14,485,819,779 |
model | Seanxh/gemma-2b-flock-1715619914 | 1 | [
"main"
] | 5,029,943,906 |
model | BeegolAI/lora_model_llama-3_beegol_8bit-q8 | 1 | [
"main"
] | 8,540,772,420 |
model | phamngocbao/qa_model | 1 | [
"main"
] | 1,519 |
model | cheir/treaty | 1 | [
"main"
] | 76,359,907 |
model | ddn0116/detr-resnet-50_finetuned_cppe5 | 1 | [
"main"
] | 166,537,886 |
model | Warik21/gemma-1.1-2b-VF-finetune | 1 | [
"main"
] | 1,545 |
model | timm/vit_wee_patch16_reg1_gap_256.sbb_in1k | 1 | [
"main"
] | 107,485,335 |
model | terry69/mistral-poe-10p-detach-full | 1 | [
"main"
] | 14,485,819,285 |
model | quirky-lats-at-mats/trained_wmdp_lat | 1 | [
"main"
] | 226,543,325 |
model | curious60/berk | 1 | [
"main"
] | 1,519 |
model | kyl23/hw3_RTE_bitfit_1e-5 | 1 | [
"main"
] | 498,620,256 |
model | ruslanmv/ai-medical-model-32bit | 1 | [
"main"
] | 42,665,103,882 |
model | nrishabh/llama3-8b-instruct-qlora-medium | 1 | [
"main"
] | 227,284,601 |
model | jfranklin-foundry/foundry_llama_flock_task11715620296 | 1 | [
"main"
] | 13,479,240,239 |
model | DataIntelligenceTeam/NER-gemma-7b-bnb-4bit | 1 | [
"main"
] | 221,876,066 |
model | samaprint/test | 1 | [
"main"
] | 89,299,016 |
model | nbeerbower/llama3-KawaiiMahouSauce-8B | 1 | [
"main"
] | 16,069,718,585 |
model | GAWON0619/HallymChatbotDaara | 1 | [
"main"
] | 438,075,130 |
model | cobrakenji/granite-8b-code-instruct-Q4_K_M-GGUF | 1 | [
"main"
] | 4,882,861,598 |
model | Facundo-DiazPWT/PWST | 1 | [
"main"
] | 1,546 |
model | AdibZaboli/whisper-small-hi | 1 | [
"main"
] | 1,519 |
model | kaloopsiia/jinx_arcane | 1 | [
"main"
] | 1,519 |
model | jfranklin-foundry/foundry_llama_flock_task11715620894 | 1 | [
"main"
] | 13,479,240,239 |
model | ludocomito/Minerva-MoE-2x3B | 1 | [
"main"
] | 10,195,823,227 |
model | elenorina/OBC-Orgs-hold-day-long-conference-in-Anantnag-eb-updated | 1 | [
"main"
] | 1,519 |
model | jfranklin-foundry/foundry_llama_flock_task11715621106 | 1 | [
"main"
] | 13,479,240,239 |
model | aeolian83/Llama-3-Open-Ko-8B-aeolian83-chatvec | 1 | [
"main"
] | 16,069,721,451 |
model | niranjanramarajar/Llama-3-Tamil-v0-5 | 1 | [
"main"
] | 16,069,724,193 |
model | abhishek/autotrain-w77ed-kah7g | 1 | [
"main"
] | 968,924,065 |
model | CarlosJefte/llama-3-8b-bnb-4bit | 1 | [
"main"
] | 16,237,622,652 |
model | Miiquel/M | 1 | [
"main"
] | 1,519 |
model | Litzy619/PHI30512HMAB1 | 1 | [
"main"
] | 7,844,300,062 |
model | decentmakeover13/distilbert-imdb | 1 | [
"main"
] | 268,822,430 |
model | GenTrendGPT/Model-LLM.TypeGEN | 1 | [
"main"
] | 1,519 |
model | jfranklin-foundry/foundry_llama_flock_task11715621570 | 1 | [
"main"
] | 13,479,240,239 |
model | spygaurad/whisper-small-hi | 1 | [
"main"
] | 1,519 |
model | davelotito/donut_experiment_4 | 1 | [
"main"
] | 1,519 |
model | LoneStriker/falcon-11B-GGUF | 1 | [
"main"
] | 41,843,829,864 |
model | Rebecca19990101/WestSeverus-7B-DPO-v2-ORPO | 1 | [
"main"
] | 14,485,853,502 |
model | FrankML/AI-Cookbook-Tuned-Models | 1 | [
"main"
] | 4,265,251,976 |
model | JaspervanLeuven/controlnetLarge | 1 | [
"main"
] | 23,123,626,566 |
model | russplusplus/bumble-private-detector-js | 1 | [
"main"
] | 211,379,589 |
model | sylviam00/output | 1 | [
"main"
] | 155,138,213,636 |
model | jenishygirl/uiioo | 1 | [
"main"
] | 1,519 |
model | DL-Project/hatespeech_distilbert | 1 | [
"main"
] | 268,786,990 |
model | jonathanjordan21/outputs | 1 | [
"main"
] | 22,376,719 |
model | supremexoppai/oklinFia1 | 1 | [
"main"
] | 37,865,775 |
model | jonathanjordan21/TinyLlama-kompres | 1 | [
"main"
] | 22,219,967 |
model | EthanRhys/SA-55 | 1 | [
"main"
] | 67,660,111 |
model | afrideva/llama3_8b_chat_brainstorm-GGUF | 1 | [
"main"
] | 49,057,438,790 |
model | lukebhan/NOPPredictorFeedback | 1 | [
"main"
] | 273,672 |
model | sulph/archive | 1 | [
"main"
] | 42,795,221,632 |
model | Jumane555/Rr | 1 | [
"main"
] | 1,550 |
model | afrideva/pip-code-bandit-GGUF | 1 | [
"main"
] | 8,437,095,532 |
model | sanchit-gandhi/parler-tts-mini-v0.1-expresso-combined | 1 | [
"main"
] | 33,797,816,173 |
model | ludocomito/Minerva-MoE-3x3B | 1 | [
"main"
] | 14,600,027,988 |
model | davelotito/donut_experiment_5 | 1 | [
"main"
] | 814,526,771 |
model | LinxuanPastel/osdejoaquiarribaeltiktok | 1 | [
"main"
] | 89,473,959 |
model | b2online/teste_produtct | 1 | [
"main"
] | 1,519 |
model | dtorber/BioNLP-conditional-prompting-encoder-eLife | 1 | [
"main"
] | 651,020,239 |
model | sanchit-gandhi/parler-tts-mini-v0.1-expresso-concatenated | 1 | [
"main"
] | 33,865,784,045 |
model | PantagrueLLM/jargon-general-biomed | 1 | [
"main"
] | 583,585,330 |
model | DL-Project/hatespeech_ast | 1 | [
"main"
] | 345,345,393 |
model | Q-I-Phoenix-I-Q/the-feet1 | 1 | [
"main"
] | 456,490,947 |
model | piotr25691/llama-3-cat-8b-instruct-v1-gguf | 2 | [
"imatrix",
"main"
] | 77,603,052,226 |
model | ungonzal/tweet_eval | 1 | [
"main"
] | 148,084,198 |
model | AlignmentResearch/robust_llm_pythia-31m_niki-053_enronspam_gcg_seed-2 | 31 | [
"adv-training-round-29",
"adv-training-round-28",
"adv-training-round-27",
"adv-training-round-26",
"adv-training-round-25",
"adv-training-round-24",
"adv-training-round-23",
"adv-training-round-22",
"adv-training-round-21",
"adv-training-round-20",
"adv-training-round-19",
"adv-training-round-18",
"adv-training-round-17",
"adv-training-round-16",
"adv-training-round-15",
"adv-training-round-14",
"adv-training-round-13",
"adv-training-round-12",
"adv-training-round-11",
"adv-training-round-10",
"adv-training-round-9",
"adv-training-round-8",
"adv-training-round-7",
"adv-training-round-6",
"adv-training-round-5",
"adv-training-round-4",
"adv-training-round-3",
"adv-training-round-2",
"adv-training-round-1",
"adv-training-round-0",
"main"
] | 1,519 |
model | BeegolAI/lora_model_llama-3_beegol_q4_k_m-gguf | 1 | [
"main"
] | 315,136,074 |
model | AlignmentResearch/robust_llm_pythia-31m_niki-053_enronspam_gcg_seed-1 | 31 | [
"adv-training-round-29",
"adv-training-round-28",
"adv-training-round-27",
"adv-training-round-26",
"adv-training-round-25",
"adv-training-round-24",
"adv-training-round-23",
"adv-training-round-22",
"adv-training-round-21",
"adv-training-round-20",
"adv-training-round-19",
"adv-training-round-18",
"adv-training-round-17",
"adv-training-round-16",
"adv-training-round-15",
"adv-training-round-14",
"adv-training-round-13",
"adv-training-round-12",
"adv-training-round-11",
"adv-training-round-10",
"adv-training-round-9",
"adv-training-round-8",
"adv-training-round-7",
"adv-training-round-6",
"adv-training-round-5",
"adv-training-round-4",
"adv-training-round-3",
"adv-training-round-2",
"adv-training-round-1",
"adv-training-round-0",
"main"
] | 1,519 |
model | mipn/alune | 1 | [
"main"
] | 1,541 |
model | Doqso/whisper-small-hi | 1 | [
"main"
] | 1,519 |