File size: 965 Bytes
aa70cfe
cf54443
c8a83e9
aa70cfe
 
 
 
c8a83e9
aa70cfe
 
 
 
c8a83e9
aa70cfe
 
 
 
 
 
 
 
cf54443
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# Load pre-trained GPT-2 model and tokenizer
model_name = "google/gemma-7b"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# Define function for generating response
def generate_response(prompt):
    # Tokenize input prompt
    input_ids = tokenizer.encode(prompt, return_tensors="pt")

    # Generate response from model
    output = model.generate(input_ids, max_length=50, num_return_sequences=1, temperature=0.9)

    # Decode response tokens
    response = tokenizer.decode(output[0], skip_special_tokens=True)

    return response

# Spaces-compatible function
def spaces_chatbot(input_dict):
    prompt = input_dict["text"]
    response = generate_response(prompt)
    return {"response": response}

# Sample input
sample_input = {"text": "Hello, how are you?"}

# Test the function
print(spaces_chatbot(sample_input))