Edit model card

Model Trained Using AutoTrain

This model was trained using AutoTrain. For more information, please visit AutoTrain.

Usage


from transformers import AutoModelForCausalLM, AutoTokenizer

model_path = "JiazhenLiu01/falcon-test2"
tokenizer = AutoTokenizer.from_pretrained(model_path)

model = AutoModelForCausalLM.from_pretrained(
    model_path,
    device_map={"": 0},
    torch_dtype='auto',
    #offload_folder="offload"
).eval()

input_text = "### Human: And both [redacted] and you are keen for next year?### Assistant:"

# Encode the input text into tokens
input_ids = tokenizer.encode(input_text, return_tensors="pt")
input_ids = input_ids.to("cuda")
# Use the trained model for dialogue inference
output = model.generate(input_ids, max_length=200, repetition_penalty=2.0)

# Decode the model output tokens into text
output_text = tokenizer.decode(output[0], skip_special_tokens=True)

response = output_text
print("Original Generated response:",output_text)
# Find the position after "### Assistant:"
assistant_index = response.find("### Assistant:")

# Extract the response after "### Assistant:" until the last sentence with a period
assistant_response = response[assistant_index + len("### Assistant:"):]

print("Generated response:",assistant_response)


Downloads last month
37