negismohit123 commited on
Commit
be9fa36
β€’
1 Parent(s): 0668be2

Upload 4 files

Browse files
Files changed (4) hide show
  1. .gitignore +2 -0
  2. README.md +6 -9
  3. main.py +110 -0
  4. requirements.txt +2 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .idea
2
+ .venv
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
- title: GemmaLiBot
3
- emoji: 🐨
4
- colorFrom: indigo
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 4.19.2
8
- app_file: app.py
9
  pinned: false
10
- license: mit
11
  ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Gradio Google Gemma
3
+ emoji: πŸŒ–
4
+ colorFrom: red
5
+ colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 4.19.1
8
+ app_file: main.py
9
  pinned: false
 
10
  ---
 
 
main.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import random
4
+
5
+ models = [
6
+ "google/gemma-7b",
7
+ "google/gemma-7b-it",
8
+ "google/gemma-2b",
9
+ "google/gemma-2b-it"
10
+ ]
11
+
12
+ clients = []
13
+ for model in models:
14
+ clients.append(InferenceClient(model))
15
+
16
+
17
+ def format_prompt(message, history):
18
+ prompt = ""
19
+ if history:
20
+ for user_prompt, bot_response in history:
21
+ prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
22
+ prompt += f"<start_of_turn>model{bot_response}"
23
+ prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
24
+ return prompt
25
+
26
+
27
+ def chat_inf(system_prompt, prompt, history, client_choice, seed, temp, tokens, top_p, rep_p):
28
+ client = clients[int(client_choice) - 1]
29
+ if not history:
30
+ history = []
31
+ hist_len = 0
32
+ if history:
33
+ hist_len = len(history)
34
+ print(hist_len)
35
+
36
+ generate_kwargs = dict(
37
+ temperature=temp,
38
+ max_new_tokens=tokens,
39
+ top_p=top_p,
40
+ repetition_penalty=rep_p,
41
+ do_sample=True,
42
+ seed=seed,
43
+ )
44
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
45
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True,
46
+ return_full_text=False)
47
+ output = ""
48
+
49
+ for response in stream:
50
+ output += response.token.text
51
+ yield [(prompt, output)]
52
+ history.append((prompt, output))
53
+ yield history
54
+
55
+
56
+ def clear_fn():
57
+ return None
58
+
59
+
60
+ rand_val = random.randint(1, 1111111111111111)
61
+
62
+
63
+ def check_rand(inp, val):
64
+ if inp is True:
65
+ return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1, 1111111111111111))
66
+ else:
67
+ return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
68
+
69
+
70
+ with gr.Blocks() as app:
71
+ gr.HTML(
72
+ """<center><h1 style='font-size:xx-large;'>Google Gemma Models</h1></center>""")
73
+ with gr.Group():
74
+ with gr.Row():
75
+ client_choice = gr.Dropdown(label="Models", type='index', choices=[c for c in models], value=models[0],
76
+ interactive=True)
77
+ chat_b = gr.Chatbot(height=500)
78
+ with gr.Group():
79
+ with gr.Row():
80
+ with gr.Column(scale=1):
81
+ with gr.Group():
82
+ rand = gr.Checkbox(label="Random Seed", value=True)
83
+ seed = gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, step=1, value=rand_val)
84
+ tokens = gr.Slider(label="Max new tokens", value=6400, minimum=0, maximum=8000, step=64,
85
+ interactive=True, visible=True, info="The maximum number of tokens")
86
+ with gr.Column(scale=1):
87
+ with gr.Group():
88
+ temp = gr.Slider(label="Temperature", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
89
+ top_p = gr.Slider(label="Top-P", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
90
+ rep_p = gr.Slider(label="Repetition Penalty", step=0.1, minimum=0.1, maximum=2.0, value=1.0)
91
+
92
+ with gr.Group():
93
+ with gr.Row():
94
+ with gr.Column(scale=3):
95
+ sys_inp = gr.Textbox(label="System Prompt (optional)")
96
+ inp = gr.Textbox(label="Prompt")
97
+ with gr.Row():
98
+ btn = gr.Button("Chat")
99
+ stop_btn = gr.Button("Stop")
100
+ clear_btn = gr.Button("Clear")
101
+
102
+ chat_sub = inp.submit(check_rand, [rand, seed], seed).then(chat_inf,
103
+ [sys_inp, inp, chat_b, client_choice, seed, temp, tokens,
104
+ top_p, rep_p], chat_b)
105
+ go = btn.click(check_rand, [rand, seed], seed).then(chat_inf,
106
+ [sys_inp, inp, chat_b, client_choice, seed, temp, tokens, top_p,
107
+ rep_p], chat_b)
108
+ stop_btn.click(None, None, None, cancels=[go, chat_sub])
109
+ clear_btn.click(clear_fn, None, [chat_b])
110
+ app.queue(default_concurrency_limit=10).launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio~=4.19.1
2
+ huggingface_hub~=0.20.3