--- library_name: transformers tags: - art datasets: - gokaygokay/prompt_description_stable_diffusion_3k language: - en pipeline_tag: text2text-generation --- # Model Card Fine tuned EleutherAI/pythia-410m using gokaygokay/prompt_description_stable_diffusion_3k dataset. ### Direct Use ``` from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "gokaygokay/phytia410m_desctoprompt" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Your description test_description = """ View to a rustic terrace filled with pots with autumn flowers and a vine full of red leaves and bunches of grapes. in the foreground a wooden table with a copious breakfast, coffee, bowls, vases and plates with fruits, nuts, chestnuts, hazelnuts, breads and buns. """ prompt_template = """### Description: {description} ### Prompt: """ text = prompt_template.format(description=test_description) def inference(text, model, tokenizer, max_input_tokens=1000, max_output_tokens=200): # Tokenize input_ids = tokenizer.encode( text, return_tensors="pt", truncation=True, max_length=max_input_tokens ) # Generate device = model.device generated_tokens_with_prompt = model.generate( input_ids=input_ids.to(device), max_length=max_output_tokens, ) # Decode generated_text_with_prompt = tokenizer.batch_decode(generated_tokens_with_prompt, skip_special_tokens=True) # Strip the prompt generated_text_answer = generated_text_with_prompt[0][len(text):] return generated_text_answer print("Description input (test):", text) print("Finetuned model's prompt: ") print(inference(text, model, tokenizer)) ```