#feel free to correct these categories, I think size should also be added model_info = { "meta-llama-Meta-Llama-3-70B": { "link": "https://huggingface.co/meta-llama/Meta-Llama-3-70B", "tuning": "🟢" # Pre-trained }, "meta-llama-Meta-Llama-3-8B": { "link": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", "tuning": "🟢" # Pre-trained }, "01-ai-Yi-1.5-34B": { "link": "https://huggingface.co/01-ai/Yi-1.5-34B", "tuning": "🔶" # Fine-tuned on task specific dataset }, "aaditya-Llama3-OpenBioLLM-70B": { "link": "https://huggingface.co/aaditya/Llama3-OpenBioLLM-70B", "tuning": "🔶" # Fine-tuned on task specific dataset }, "CohereForAI-aya-23-35B": { "link": "https://huggingface.co/CohereForAI/aya-23-35B", "tuning": "🔶" # Fine-tuned on task specific dataset }, "CohereForAI-c4ai-command-r-plus": { "link": "https://huggingface.co/CohereForAI/c4ai-command-r-plus", "tuning": "💬" # Chat-model (RLHF, DPO, IFT, etc.) }, "johnsnowlabs-JSL-MedLlama-3-8B-v9": { "link": "https://huggingface.co/johnsnowlabs/JSL-MedLlama-3-8B-v9", "tuning": "🔶" # Fine-tuned on task specific dataset }, "meta-llama-Llama-2-70B-hf": { "link": "https://huggingface.co/meta-llama/Llama-2-70B-hf", "tuning": "🟢" # Pre-trained }, "meta-llama-Llama-2-7b-hf": { "link": "https://huggingface.co/meta-llama/Llama-2-7b-hf", "tuning": "🟢" # Pre-trained }, "microsoft-phi-1_5": { "link": "https://huggingface.co/microsoft/phi-1_5", "tuning": "🟢" # Pre-trained }, "microsoft-phi-1": { "link": "https://huggingface.co/microsoft/phi-1", "tuning": "🟢" # Pre-trained }, "microsoft-phi-2": { "link": "https://huggingface.co/microsoft/phi-2", "tuning": "🟢" # Pre-trained }, "microsoft-Phi-3-medium-4k-instruct": { "link": "https://huggingface.co/microsoft/Phi-3-medium-4k-instruct", "tuning": "💬" # Chat-model (RLHF, DPO, IFT, etc.) }, "mistralai-Mistral-7B-v0.3": { "link": "https://huggingface.co/mistralai/Mistral-7B-v0.3", "tuning": "🟢" # pre-trained }, "mistralai-Mixtral-8x22B-v0.1": { "link": "https://huggingface.co/mistralai/Mixtral-8x22B-v0.1", "tuning": "🟢" # pre-trained }, "mistralai-Mixtral-8x7B-v0.1": { "link": "https://huggingface.co/mistralai/Mixtral-8x7B-v0.1", "tuning": "🟢" # pre-trained }, "ProbeMedicalYonseiMAILab-medllama3-v20": { "link": "https://huggingface.co/ProbeMedicalYonseiMAILab/medllama3-v20", "tuning": "🔶" # Fine-tuned on task specific dataset }, "Qwen-Qwen2-72B": { "link": "https://huggingface.co/Qwen/Qwen2-72B", "tuning": "🟢" # Pre-trained }, "Qwen-Qwen2-7B": { "link": "https://huggingface.co/Qwen/Qwen2-7B", "tuning": "🟢" # Pre-trained }, "GPT-4": { "link": "https://platform.openai.com/docs/models", "tuning": "💬" }, "GPT-4o": { "link": "https://platform.openai.com/docs/models", "tuning": "💬" }, "GPT-3.5 Turbo": { "link": "https://platform.openai.com/docs/models", "tuning": "💬" }, "Claude Opus": { "link": "https://www.anthropic.com/api", "tuning": "💬" }, "Gemini 1.5 Pro": { "link": "https://ai.google.dev/", "tuning": "💬" }, "Gemini Pro 1": { "link": "https://ai.google.dev/", "tuning": "💬" }, "Gemini 1.5 Flash": { "link": "https://ai.google.dev/", "tuning": "💬" } }