File size: 2,632 Bytes
5b6bc45
4c79a74
81395fc
b01335d
5b6bc45
 
8867e8a
4ffc0ce
5189ab0
c30f436
81395fc
bdec0c5
bf9669d
 
237d9d2
bdec0c5
 
bf9669d
bdec0c5
 
bf9669d
bdec0c5
81395fc
7f45f98
5b6bc45
535aa50
81395fc
237d9d2
 
 
 
 
81395fc
 
 
 
 
 
 
 
 
 
af69c70
 
 
 
 
81395fc
 
bf9669d
81395fc
 
 
 
d992640
237d9d2
b01335d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
from transformers import  AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, MistralForCausalLM
from peft import PeftModel, PeftConfig
import torch
import gradio as gr
  
  
# Use the base model's ID
base_model_id = "mistralai/Mistral-7B-v0.1"
model_directory = "Tonic/mistralmed"

# Instantiate the Models
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True)
#tokenizer.pad_token = tokenizer.eos_token
#tokenizer.padding_side = 'left'


# Specify the configuration class for the model
#model_config = AutoConfig.from_pretrained(base_model_id)

# Load the PEFT model with the specified configuration
#peft_model = AutoModelForCausalLM.from_pretrained(base_model_id, config=model_config)

# Load the PEFT model
peft_config = PeftConfig.from_pretrained("Tonic/mistralmed", token="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")
peft_model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True)
peft_model = PeftModel.from_pretrained(peft_model, "Tonic/mistralmed", token="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")

class ChatBot:
    def __init__(self):
        self.history = []

    def predict(self, input):
        # Encode user input
        user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors="pt")

        # Concatenate the user input with chat history
        if self.history:
            chat_history_ids = torch.cat([self.history, user_input_ids], dim=-1)
        else:
            chat_history_ids = user_input_ids

        # Generate a response using the PEFT model
        # response = peft_model.generate(chat_history_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
        # response = peft_model.generate(chat_history_ids)  
        response = peft_model.generate(input_ids=chat_history_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)



        # Update chat history
        self.history = chat_history_ids

        # Decode and return the response
        response_text = tokenizer.decode(response[0], skip_special_tokens=True)
        return response_text

bot = ChatBot()

title = "👋🏻Welcome to Tonic's MistralMed Chat🚀"
description = "You can use this Space to test out the current model (MistralMed) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on Discord to build together."
examples = [["What is the boiling point of nitrogen"]]

iface = gr.Interface(
    fn=bot.predict,
    title=title,
    description=description,
    examples=examples,
    inputs="text",
    outputs="text",
    theme="ParityError/Anime"
)

iface.launch()