File size: 1,320 Bytes
fc90836
 
 
 
 
8780c33
fc90836
 
 
 
 
 
8780c33
0d2c713
 
8780c33
0d2c713
8780c33
 
0d2c713
 
8780c33
0d2c713
8780c33
0d2c713
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import os
os.system('pip install transformers==4.31.0 accelerate tiktoken einops')
# os.system('pip install "modelscope" --upgrade -f https://pypi.org/project/modelscope/')
# os.system('pip install transformers_stream_generator')

import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig

tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", device_map="auto", trust_remote_code=True).eval()
model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B", trust_remote_code=True)

def generate(prompt):

    batch = tokenizer(prompt, return_tensors="pt")
    generated_ids = model.generate(batch["input_ids"], max_new_tokens=150)
    output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
    return output[0]

input_component = gr.Textbox(label = "Input your Creative Thinking", value = "Creative Thinking")
output_component = gr.Textbox(label = "Prompt")
examples = [["Creative Thinking"], ["Inspiration"]]
description = ""
gr.Interface(generate, inputs = input_component, outputs=output_component, examples=examples, title = "πŸ‘¨πŸ»β€πŸŽ€ Power For your Inspirations πŸ‘¨πŸ»β€πŸŽ€", description=description).launch()