rashmi commited on
Commit
0ca0182
β€’
1 Parent(s): c20a54f
Files changed (2) hide show
  1. app copy.py +43 -0
  2. app.py +36 -8
app copy.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+
4
+ import os
5
+ import gc
6
+ import random
7
+ import warnings
8
+
9
+ warnings.filterwarnings("ignore")
10
+
11
+ import numpy as np
12
+ import pandas as pd
13
+
14
+ pd.set_option("display.max_rows", 500)
15
+ pd.set_option("display.max_columns", 500)
16
+ pd.set_option("display.width", 1000)
17
+ from tqdm.auto import tqdm
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import tokenizers
22
+ import transformers
23
+
24
+ print(f"tokenizers.__version__: {tokenizers.__version__}")
25
+ print(f"transformers.__version__: {transformers.__version__}")
26
+ print(f"torch.__version__: {torch.__version__}")
27
+ print(f"torch cuda version: {torch.version.cuda}")
28
+ from transformers import AutoTokenizer, AutoConfig
29
+ from transformers import BitsAndBytesConfig, AutoModelForCausalLM, MistralForCausalLM
30
+ from peft import LoraConfig, get_peft_model
31
+
32
+
33
+ title = "H2O AI Predict the LLM"
34
+
35
+ zero = torch.Tensor([0]).cuda()
36
+ print(zero.device) # <-- 'cpu' πŸ€”
37
+
38
+ @spaces.GPU
39
+ def greet(n):
40
+ print(zero.device) # <-- 'cuda:0' πŸ€—
41
+ return f"Hello {zero + n} Tensor"
42
+
43
+ gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text()).launch()
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- import spaces
3
 
4
  import os
5
  import gc
@@ -32,12 +32,40 @@ from peft import LoraConfig, get_peft_model
32
 
33
  title = "H2O AI Predict the LLM"
34
 
35
- zero = torch.Tensor([0]).cuda()
36
- print(zero.device) # <-- 'cpu' πŸ€”
 
 
 
 
 
 
37
 
38
- @spaces.GPU
39
- def greet(n):
40
- print(zero.device) # <-- 'cuda:0' πŸ€—
41
- return f"Hello {zero + n} Tensor"
42
 
43
- gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text()).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ # import spaces
3
 
4
  import os
5
  import gc
 
32
 
33
  title = "H2O AI Predict the LLM"
34
 
35
+ #Theme from - https://huggingface.co/spaces/trl-lib/stack-llama/blob/main/app.py
36
+ theme = gr.themes.Monochrome(
37
+ primary_hue="indigo",
38
+ secondary_hue="blue",
39
+ neutral_hue="slate",
40
+ radius_size=gr.themes.sizes.radius_sm,
41
+ font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"],
42
+ )
43
 
44
+ def do_submit(question, response):
45
+ full_text = question + " " + response
46
+ # result = do_inference(full_text)
47
+ return "result"
48
 
49
+ with gr.Blocks(title=title) as demo: # theme=theme
50
+ sample_examples = pd.read_csv('sample_examples.csv')
51
+ example_list = sample_examples[['Question','Response','target']].sample(2).values.tolist()
52
+ gr.Markdown(f"## {title}")
53
+ with gr.Row():
54
+ # with gr.Column(scale=1):
55
+ # gr.Markdown("### Question and LLM Response")
56
+ question_text = gr.Textbox(lines=2, placeholder="Question:", label="")
57
+ response_text = gr.Textbox(lines=2, placeholder="Response:", label="")
58
+ target_text = gr.Textbox(lines=1, placeholder="Target:", label="", interactive=False , visible=False)
59
+ llm_num = gr.Textbox(value="", label="LLM #")
60
+ with gr.Row():
61
+ sub_btn = gr.Button("Submit")
62
+ sub_btn.click(fn=do_submit, inputs=[question_text, response_text], outputs=[llm_num])
63
+
64
+ gr.Markdown("## Sample Inputs:")
65
+ gr.Examples(
66
+ example_list,
67
+ [question_text,response_text,target_text],
68
+ # cache_examples=True,
69
+ )
70
+
71
+ demo.launch(debug=True)