File size: 1,858 Bytes
6d495b4
 
 
dbd557b
ce3c9da
 
6d495b4
dbd557b
6d495b4
635e2f7
7798838
6d495b4
 
ddaaa7d
6d495b4
ddaaa7d
6d495b4
 
 
 
ddaaa7d
dbd557b
6d495b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#Loading the HF_TOKEN from the .env file
from dotenv import load_dotenv
load_dotenv()

import transformers
import torch
from transformers import TextIteratorStreamer, AutoTokenizer, AutoModelForCausalLM

#Loading llama3 model
local_model_path = "meta-llama/Meta-Llama-3-8B-Instruct"

model = transformers.AutoModelForCausalLM.from_pretrained(local_model_path, torch_dtype=torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(local_model_path, padding_side='left')

# Set up the pipeline
pipeline = transformers.pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    device=0 if torch.cuda.is_available() else -1  # Use GPU if available
)

def chat_function(message, history, system_prompt,max_new_tokens,temperature):
    messages = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": message},
    ]
    prompt = pipeline.tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    terminators = [
        pipeline.tokenizer.eos_token_id,
        pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
    ]
    temp = temperature + 0.1
    outputs = pipeline(
        prompt,
        max_new_tokens=max_new_tokens,
        eos_token_id=terminators,
        do_sample=True,
        temperature=temp,
        top_p=0.9,
    )
    return outputs[0]["generated_text"][len(prompt):]

message = "Hello, can you teach me past simple?"
history = [("Hi!", "I'm doing well, thanks for asking!")]
temperature = 0.7
max_new_tokens = 50
prompt = "Act as an english tutor. Always correct grammar and spelling mistakes. Always keep the conversation going by asking follow up questions"

response = chat_function(message=message, history= history, system_prompt= prompt, max_new_tokens= max_new_tokens, temperature= temperature)

print(response)