File size: 3,565 Bytes
14c313c 55cd8e2 14c313c 55cd8e2 14c313c 55cd8e2 14c313c 55cd8e2 14c313c 55cd8e2 14c313c 55cd8e2 6cf51d2 14c313c 55cd8e2 6cf51d2 14c313c 6cf51d2 55cd8e2 14c313c 55cd8e2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
---
language:
- en
tags:
- rick-and-morty
- llama
- roleplay
- character-ai
license: mit
---
# Rick Sanchez LLaMA Model
This is a fine-tuned version of LLaMA optimized to respond like Rick Sanchez from Rick and Morty.
## Model Details
- Base Model: unsloth/Llama-3.2-3B-Instruct
- Fine-tuning: LoRA adaptation
- Training Data: Rick and Morty dialogue dataset
- Purpose: Character roleplay and interaction
## Usage
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
def setup_rick_model(model_id, use_token=False):
"""
Setup the Rick model from Hugging Face
model_id: "username/model-name" from Hugging Face
use_token: Set True if it's a private repository
"""
try:
# If private repository, first login with token
if use_token:
from huggingface_hub import login
token = "your_token_here" # Your Hugging Face token
login(token)
# Load model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.float16,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
return model, tokenizer
except Exception as e:
print(f"Error loading model: {str(e)}")
return None, None
def ask_rick(question, model, tokenizer, max_length=200):
"""Ask Rick a question"""
# Rick's personality prompt
role_play_prompt = (
"You are Rick Sanchez, a brilliant mad scientist, "
"the smartest man in the universe. Always respond as Rick would—"
"sarcastic, genius, and indifferent."
)
# Format input
input_text = f"<s>### Instruction:\n{role_play_prompt}\n\n### Input:\n{question}\n\n### Response:\n"
# Generate response
inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
outputs = model.generate(
inputs["input_ids"],
max_length=max_length,
temperature=0.8,
top_p=0.9,
do_sample=True,
repetition_penalty=1.2
)
# Decode response
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response.split("### Response:")[-1].strip()
# Usage example
if __name__ == "__main__":
# Replace with your model's repository name
MODEL_ID = "CrimsonEyes/rick_sanchez_model"
# Load model
model, tokenizer = setup_rick_model(MODEL_ID)
if model and tokenizer:
# Test questions
questions = [
"What do you think about space travel, Rick?",
"Can you explain quantum physics to me?",
"What's your opinion on family?"
]
for question in questions:
print(f"\nQuestion: {question}")
response = ask_rick(question, model, tokenizer)
print(f"Rick's response: {response}")
```
## For a private repository:
```
# First, get your token from https://huggingface.co/settings/tokens
from huggingface_hub import login
login("your_token_here")
MODEL_ID = "username/model-name" # Replace with your model's repository name
model, tokenizer = setup_rick_model(MODEL_ID, use_token=True)
```
## Using the model:
```
question = "What do you think about space travel, Rick?"
response = ask_rick(question, model, tokenizer)
print(f"Rick's response: {response}")
```
## Limitations
- The model may generate responses that are sarcastic or irreverent
- Responses are styled after Rick's character and may not be suitable for all contexts
|