BioinspiredMixtral / README.md
mjbuehler's picture
Update README.md
14ff717 verified
|
raw
history blame
5.38 kB
metadata
license: apache-2.0

BioinspiredMixtral: Large Language Model for the Mechanics of Biological and Bio-Inspired Materials using Mixture-of-Experts

To accelerate discovery and guide insights, we report an open-source autoregressive transformer large language model (LLM), trained on expert knowledge in the biological materials field, especially focused on mechanics and structural properties.

The model is finetuned with a corpus of over a thousand peer-reviewed articles in the field of structural biological and bio-inspired materials and can be prompted to recall information, assist with research tasks, and function as an engine for creativity.

image/png

This model is based on work reported in https://doi.org/10.1002/advs.202306724, but focused on the development of a mixture-of-experts strategy.

The model is a fine-tuned version of mistralai/Mixtral-8x7B-Instruct-v0.1.

from llama_cpp import Llama

model_path='lamm-mit/BioinspiredMixtral/ggml-model-q5_K_M.gguf'
chat_format="mistral-instruct"

llm = Llama(model_path=model_path,
            n_gpu_layers=-1,verbose= True, 
            n_ctx=10000,
            #main_gpu=0,
            chat_format=chat_format,
            #split_mode=llama_cpp.LLAMA_SPLIT_LAYER
            )

Or, download directly from Hugging Face:

from llama_cpp import Llama

model_path='lamm-mit/BioinspiredMixtral/ggml-model-q5_K_M.gguf'
chat_format="mistral-instruct"

llm = Llama.from_pretrained(
    repo_id=model_path,
    filename="*q5_K_M.gguf",
    verbose=True,
    n_gpu_layers=-1, 
    n_ctx=10000,
    #main_gpu=0,
    chat_format=chat_format,
)

For inference:

def generate_response (model,tokenizer,text_input="Biology offers amazing possibilities, especially for",
                      num_return_sequences=1,
                      temperature=1., #the higher the temperature, the more creative the model becomes
                      max_new_tokens=127,
                      num_beams=1,
                      top_k = 50,
                      top_p =0.9,repetition_penalty=1.,eos_token_id=2,verbatim=False,
                      exponential_decay_length_penalty_fac=None,add_special_tokens  =True,  
                      ):
    inputs = tokenizer(text_input, add_special_tokens = add_special_tokens, return_tensors ='pt').to(device)

    with torch.no_grad():
         
          outputs = model.generate (input_ids = inputs["input_ids"],
                                    attention_mask = inputs["attention_mask"] , # This is usually done automatically by the tokenizer
                                    max_new_tokens=max_new_tokens,
                                    temperature=temperature, #value used to modulate the next token probabilities.
                                    num_beams=num_beams,
                                    top_k = top_k,
                                    top_p = top_p,
                                    num_return_sequences = num_return_sequences,
                                    eos_token_id=eos_token_id,
                                    pad_token_id = eos_token_id,
                                    do_sample =True,#skip_prompt=True,
                                    repetition_penalty=repetition_penalty,
                                   )

    return tokenizer.batch_decode(outputs[:,inputs["input_ids"].shape[1]:].detach().cpu().numpy(), skip_special_tokens=True)

def generate_BioMixtral  (system_prompt='You a helpful assistant. You are familiar with materials science, especially biological and bioinspired materials. ',
                          prompt='What is spider silk in the context of bioinspired materials?',
                          repetition_penalty=1.,
                          top_p=0.9, top_k=256,  
                          temperature=0.5, max_tokens=512, verbatim=False, eos_token=None,
                          prepend_response='',
                         ):

    if eos_token==None:
        eos_token= tokenizer.eos_token_id

    if system_prompt==None:
        messages=[ 
            {"role": "user", "content": prompt},
            ]
    else:
        messages=[ 
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": prompt},
        ]
    txt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True,
                                        )
    txt=txt+prepend_response
     
    output_text=generate_response (model,tokenizer,text_input=txt,eos_token_id=eos_token,
                                   num_return_sequences=1,  repetition_penalty=repetition_penalty,
                                   top_p=top_p, top_k=top_k,  
                                   temperature=temperature,max_new_tokens=max_tokens, verbatim=verbatim, 
                                  )
    return output_text[0]

start_time = time.time()
result=generate_BioMixtral(system_prompt='You respond accurately.', 
                        prompt="What is graphene? Answer with detail.",
                        max_tokens=512, temperature=0.7,  )

print (result)
deltat=time.time() - start_time
print("--- %s seconds ---" % deltat)
toked=tokenizer(res)
print ("Tokens per second (generation): ", len (toked['input_ids'])/deltat)