File size: 6,312 Bytes
0e1636e d36edb4 7a714ac 0e1636e e05d5a8 0e1636e b166a40 0e1636e b166a40 0e1636e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import random
class FetchModel:
@staticmethod
def all_models():
models = [
{
"id": "llama-4-maverick-17b",
"name": "LLaMA 4 Maverick 17B",
"description": "Meta AI's 17B-parameter general-purpose model from the LLaMA 4 series, designed for high-quality text generation.",
"type": "text"
},
{
"id": "llama-4-scout-17b",
"name": "LLaMA 4 Scout 17B",
"description": "Instruction-tuned version of LLaMA 4 by Meta, tailored for alignment and structured task performance.",
"type": "text"
},
{
"id": "llama-3.1-8b",
"name": "LLaMA 3.1 8B",
"description": "A fast and lightweight 8B parameter model from Meta's LLaMA 3.1 line, optimized for low-latency inference.",
"type": "text"
},
{
"id": "llama-3.3-70b",
"name": "LLaMA 3.3 70B",
"description": "Meta's 70B parameter flagship model from LLaMA 3.3, designed for state-of-the-art language understanding and generation.",
"type": "text"
},
{
"id": "deepseek-r1",
"name": "DeepSeek R1",
"description": "DeepSeek AIs foundational model focused on reasoning, language understanding, and long-context comprehension.",
"type": "text"
},
{
"id": "deepseek-v3",
"name": "DeepSeek V3",
"description": "DeepSeek AIs third-generation model with enhanced reasoning and coding abilities.",
"type": "text"
},
{
"id": "qwen-2.5-72b",
"name": "Qwen 2.5 72B",
"description": "Large instruction-tuned language model from Qwen 2.5 family, optimized for complex NLP tasks.",
"type": "text"
},
{
"id": "gemma-2-27b",
"name": "Gemma 2 27B",
"description": "Googles instruction-tuned model with 27B parameters, capable of high-performance natural language understanding.",
"type": "text"
},
{
"id": "grok-3",
"name": "Grok 3",
"description": "xAI's general-purpose large language model designed for reasoning, conversation, and alignment.",
"type": "text"
},
{
"id": "grok-3-fast",
"name": "Grok 3 (Fast)",
"description": "A low-latency version of Grok 3 optimized for responsiveness and quick task execution.",
"type": "text"
},
{
"id": "grok-3-mini",
"name": "Grok 3 Mini",
"description": "A smaller variant of Grok 3 designed for lighter inference while maintaining core capabilities.",
"type": "text"
},
{
"id": "grok-3-mini-fast",
"name": "Grok 3 Mini (Fast)",
"description": "Fast and lightweight variant of Grok 3 Mini for extremely low-latency use cases.",
"type": "text"
},
{
"id": "grok-2-1212",
"name": "Grok 2 1212",
"description": "An earlier generation Grok model from xAI, optimized for general language tasks with improved efficiency.",
"type": "text"
},
{
"id": "fal-ai/fast-sdxl",
"name": "Fast SDXL",
"description": "A fast and efficient image generation model from the SDXL family, optimized for high-quality outputs.",
"type": "image"
}
]
return models
@staticmethod
def select_model(id):
if id == "llama-4-maverick-17b":
options = ['meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8']
model = random.choice(options)
return model
elif id == "llama-4-scout-17b":
options = ['meta-llama/Llama-4-Scout-17B-16E-Instruct', 'meta-llama/llama-4-scout-17b-16e-instruct']
model = random.choice(options)
return model
elif id == "llama-3.1-8b":
options = ['llama-3.1-8b-instant']
model = random.choice(options)
return model
elif id == "llama-3.3-70b":
options = ['meta-llama/Llama-3.3-70B-Instruct-Turbo', 'llama-3.3-70b-versatile', 'meta-llama/Llama-3.3-70B-Instruct-Turbo']
model = random.choice(options)
return model
elif id == "deepseek-r1":
options = ['deepseek-ai/DeepSeek-R1', 'deepseek-r1-distill-llama-70b', 'deepseek-ai/DeepSeek-R1-Turbo', 'deepseek-ai/DeepSeek-R1-Distill-Llama-70B', 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B']
model = random.choice(options)
return model
elif id == "deepseek-v3":
options = ['deepseek-ai/DeepSeek-V3']
model = random.choice(options)
return model
elif id == "qwen-2.5-72b":
options = ['Qwen/Qwen2.5-VL-72B-Instruct', 'Qwen/Qwen2.5-72B-Instruct']
model = random.choice(options)
return model
elif id == "gemma-2-27b":
options = ['google/gemma-2-27b-it']
model = random.choice(options)
return model
elif id == "grok-3":
options = ['grok-3']
model = random.choice(options)
return model
elif id == "grok-3-fast":
options = ['grok-3-fast']
model = random.choice(options)
return model
elif id == "grok-3-mini":
options = ['grok-3-mini']
model = random.choice(options)
return model
elif id == "grok-3-mini-fast":
options = ['grok-3-mini-fast']
model = random.choice(options)
return model
elif id == "grok-2-1212":
options = ['grok-2-1212']
model = random.choice(options)
return model
|