models=[ # MUST USE CHAT (RLHF) MODELS FROM HUB WITH INFERENCE AVAILABLE "google/gemma-2b-it", "google/gemma-7b-it", "HuggingFaceH4/zephyr-7b-beta", "microsoft/phi-2", "M4-ai/TinyMistral-6x248M", "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "openai-community/gpt2-xl", "bigscience/bloom", "stabilityai/stablelm-2-zephyr-1_6b", "stabilityai/stablelm-zephyr-3b", "huggingtweets/porns_xx", # "meta-llama/Llama-2-70b-hf", "ura-hcmut/GemSUra-2B" ]