neuralleap commited on
Commit
b9c7951
1 Parent(s): 8739bcf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -64,10 +64,10 @@ quant_config = BitsAndBytesConfig(
64
  )
65
 
66
  #config = PeftConfig.from_pretrained("physician-ai/mistral-finetuned1",use_auth_token=access_token)
67
- model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2",use_auth_token=access_token,quantization_config=quant_config,device_map="auto")
68
- model = PeftModel.from_pretrained(model, "physician-ai/mistral-finetuned1",use_auth_token=access_token)
69
- tokenizer = AutoTokenizer.from_pretrained("physician-ai/mistral-finetuned1",use_auth_token=access_token)
70
- text_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=1024, temperature=0.8, top_p=0.95, repetition_penalty=1.15)
71
 
72
  terminators = [
73
  tokenizer.eos_token_id,
@@ -120,7 +120,7 @@ chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterf
120
  with gr.Blocks(fill_height=True, css=css) as demo:
121
 
122
  gr.Markdown(DESCRIPTION)
123
- gr.DuplicateButton(value="Used Finetuned Mistral 7B Model", elem_id="duplicate-button")
124
  gr.ChatInterface(
125
  fn=chat_llama3_8b,
126
  chatbot=chatbot,
 
64
  )
65
 
66
  #config = PeftConfig.from_pretrained("physician-ai/mistral-finetuned1",use_auth_token=access_token)
67
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B",use_auth_token=access_token,quantization_config=quant_config,device_map="auto")
68
+ model = PeftModel.from_pretrained(model, "physician-ai/llama3-8b-finetuned",use_auth_token=access_token,quantization_config=quant_config,device_map="auto")
69
+ tokenizer = AutoTokenizer.from_pretrained("physician-ai/llama3-8b-finetuned",use_auth_token=access_token)
70
+ text_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=4096, temperature=0.8, top_p=0.95, repetition_penalty=1.15)
71
 
72
  terminators = [
73
  tokenizer.eos_token_id,
 
120
  with gr.Blocks(fill_height=True, css=css) as demo:
121
 
122
  gr.Markdown(DESCRIPTION)
123
+ gr.DuplicateButton(value="Finetuned LLAMA 3 8B Model", elem_id="duplicate-button")
124
  gr.ChatInterface(
125
  fn=chat_llama3_8b,
126
  chatbot=chatbot,