from transformers import AutoTokenizer, AutoModelForCausalLM import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model_id = "CohereForAI/c4ai-command-r-v01-4bit" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id).to(device) ## <|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> async def get_answer_from_llm(question: str = None): # Format message with the command-r chat template messages = [{"role": "user", "content": f"{question}"}] input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") gen_tokens = model.generate( input_ids, max_new_tokens=100, do_sample=True, temperature=0.3, ) gen_text = await tokenizer.decode(gen_tokens[0]) return gen_text