basakerdogan commited on
Commit
b467c4f
β€’
1 Parent(s): ef70fb3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -74
app.py CHANGED
@@ -1,79 +1,20 @@
1
- from huggingface_hub import InferenceClient
2
  import gradio as gr
 
3
 
4
- client = InferenceClient("basakerdogan/Cyber-Jarvis")
 
5
 
6
- def format_prompt(message, history, system_prompt=None):
7
- prompt = "<s>"
8
- for user_prompt, bot_response in history:
9
- prompt += f"[INST] {user_prompt} [/INST]"
10
- prompt += f" {bot_response}</s> "
11
- if system_prompt:
12
- prompt += f"[SYS] {system_prompt} [/SYS]"
13
- prompt += f"[INST] {message} [/INST]"
14
- return prompt
15
 
16
- def generate(
17
- prompt, history, system_prompt=None, temperature=0.2, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0,
18
- ):
19
- temperature = float(temperature)
20
- if temperature < 1e-2:
21
- temperature = 1e-2
22
- top_p = float(top_p)
23
 
24
- generate_kwargs = dict(
25
- temperature=temperature,
26
- max_new_tokens=max_new_tokens,
27
- top_p=top_p,
28
- repetition_penalty=repetition_penalty,
29
- do_sample=True,
30
- seed=42,
31
- )
32
-
33
- formatted_prompt = format_prompt(prompt, history, system_prompt)
34
-
35
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
36
- output = ""
37
-
38
- for response in stream:
39
- output += response.token.text
40
- yield output
41
- return output
42
-
43
- mychatbot = gr.Chatbot(
44
- avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
45
-
46
- demo = gr.ChatInterface(
47
- fn=generate,
48
- chatbot=mychatbot,
49
- title="Hello! I'm a AI Chatbot by Exnrt.πŸ‘‹ How can I help you today?",
50
- css="body { background-color: inherit; overflow-x:hidden;}"
51
- ":root {--color-accent: transparent !important; --color-accent-soft:transparent !important; --code-background-fill:black !important; --body-text-color:white !important;}"
52
- "#component-2 {background:#ffffff1a; display:contents;}"
53
- "div#component-0 { height: auto !important;}"
54
- ".gradio-container.gradio-container-4-8-0.svelte-1kyws56.app {max-width: 100% !important;}"
55
- "gradio-app {background: linear-gradient(134deg,#00425e 0%,#001a3f 43%,#421438 77%) !important; background-attachment: fixed !important; background-position: top;}"
56
- ".panel.svelte-vt1mxs {background: transparent; padding:0;}"
57
- ".block.svelte-90oupt { background: transparent; border-color: transparent;}"
58
- ".bot.svelte-12dsd9j.svelte-12dsd9j.svelte-12dsd9j { background: #ffffff1a; border-color: transparent; color: white;}"
59
- ".user.svelte-12dsd9j.svelte-12dsd9j.svelte-12dsd9j { background: #ffffff1a; border-color: transparent; color: white; padding: 10px 18px;}"
60
- "div.svelte-iyf88w{ background: #cc98d445; border-color: transparent; border-radius: 25px;}"
61
- "textarea.scroll-hide.svelte-1f354aw { background: transparent; color: #fff !important;}"
62
- ".primary.svelte-cmf5ev { background: transparent; color: white;}"
63
- ".primary.svelte-cmf5ev:hover { background: transparent; color: white;}"
64
- "button#component-8 { display: none; position: absolute; margin-top: 60px; border-radius: 25px;}"
65
- "div#component-9 { max-width: fit-content; margin-left: auto; margin-right: auto;}"
66
- "button#component-10, button#component-11, button#component-12 { flex: none; background: #ffffff1a; border: none; color: white; margin-right: auto; margin-left: auto; border-radius: 9px; min-width: fit-content;}"
67
- ".share-button.svelte-12dsd9j { display: none;}"
68
- "footer.svelte-mpyp5e { display: none !important;}"
69
- ".message-buttons-bubble.svelte-12dsd9j.svelte-12dsd9j.svelte-12dsd9j { border-color: #31546E; background: #31546E;}"
70
- ".bubble-wrap.svelte-12dsd9j.svelte-12dsd9j.svelte-12dsd9j {padding: 0;}"
71
- ".prose h1 { color: white !important; font-size: 16px !important; font-weight: normal !important; background: #ffffff1a; padding: 20px; border-radius: 20px; width: 90%; margin-left: auto !important; margin-right: auto !important;}"
72
- ".toast-wrap.svelte-pu0yf1 { display:none !important;}"
73
- ".scroll-hide { scrollbar-width: auto !important;}"
74
- ".main svelte-1kyws56 {max-width: 800px; align-self: center;}"
75
- "div#component-4 {max-width: 650px; margin-left: auto; margin-right: auto;}"
76
- "body::-webkit-scrollbar { display: none;}"
77
- )
78
-
79
- demo.queue().launch(show_api=False)
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
  import gradio as gr
3
+ import spaces
4
 
5
+ tokenizer = AutoTokenizer.from_pretrained("basakerdogan/Cyber-Jarvis")
6
+ model = AutoModelForSeq2SeqLM.from_pretrained("basakerdogan/Cyber-Jarvis", from_tf=True)
7
 
8
+ @spaces.GPU
9
+ def generate(prompt):
 
 
 
 
 
 
 
10
 
11
+ batch = tokenizer(prompt, return_tensors="pt")
12
+ generated_ids = model.generate(batch["input_ids"], max_new_tokens=150)
13
+ output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
14
+ return output[0]
 
 
 
15
 
16
+ input_component = gr.Textbox(label = "Input a persona, e.g. photographer", value = "photographer")
17
+ output_component = gr.Textbox(label = "Prompt")
18
+ examples = [["photographer"], ["developer"]]
19
+ description = "This app generates ChatGPT prompts, it's based on a BART model trained on [this dataset](https://huggingface.co/datasets/fka/awesome-chatgpt-prompts). πŸ““ Simply enter a persona that you want the prompt to be generated based on. πŸ§™πŸ»πŸ§‘πŸ»β€πŸš€πŸ§‘πŸ»β€πŸŽ¨πŸ§‘πŸ»β€πŸ”¬πŸ§‘πŸ»β€πŸ’»πŸ§‘πŸΌβ€πŸ«πŸ§‘πŸ½β€πŸŒΎ"
20
+ gr.Interface(generate, inputs = input_component, outputs=output_component, examples=examples, title = "πŸ‘¨πŸ»β€πŸŽ€ ChatGPT Prompt Generator πŸ‘¨πŸ»β€πŸŽ€", description=description).launch()