Nexta-39-23 / README.md
saleh1977's picture
Update README.md
555ad98 verified

from peft import PeftModel from transformers import AutoModelForCausalLM, AutoTokenizer

Load base model

base_model = AutoModelForCausalLM.from_pretrained( "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", load_in_4bit=True, device_map="auto" )

Load NEXTa adapters

model = PeftModel.from_pretrained(base_model, "NEXTa-SA/Nexta-39-23") tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B")

Example prompt structure

prompt = """ Task: Create social media post Language: English Brand: [Brand name] Audience: [Target audience] Objective: [Campaign objective] Tone: [Desired tone] Additional context: [Any specific requirements]

Generate a social media post that: """

inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate( **inputs, max_new_tokens=300, temperature=0.7, top_p=0.9, do_sample=True )

response = tokenizer.decode(outputs[0], skip_special_tokens=True) print(response)