sandz7 commited on
Commit
d483b43
1 Parent(s): 4aee1e8

added torch in requirements and removed some text from UI

Browse files
Files changed (2) hide show
  1. app.py +4 -35
  2. requirements.txt +2 -1
app.py CHANGED
@@ -17,34 +17,6 @@ DESCRIPTION = '''
17
  </div>
18
  '''
19
 
20
- LICENSE = """
21
- <p/>
22
- ---
23
- Built with Meta Llama 3
24
- """
25
-
26
- PLACEHOLDER = """
27
- <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
28
- <img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8e75e61cc9bab22b7ce3dec85ab0e6db1da5d107/Meta_lockup_positive%20primary_RGB.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
29
- <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Meta llama3</h1>
30
- <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p>
31
- </div>
32
- """
33
-
34
-
35
- css = """
36
- h1 {
37
- text-align: center;
38
- display: block;
39
- }
40
- #duplicate-button {
41
- margin: auto;
42
- color: white;
43
- background: #1565c0;
44
- border-radius: 100vh;
45
- }
46
- """
47
-
48
  # Load the tokenizer and model
49
  tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
50
  model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", torch_dtype=torch.float16).to('cuda')
@@ -97,16 +69,15 @@ def chat_llama3_8b(message: str,
97
  for text in streamer:
98
  outputs.append(text)
99
  #print(outputs)
100
- yield "".join(outputs)
101
 
102
 
103
  # Gradio block
104
- chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
105
 
106
- with gr.Blocks(fill_height=True, css=css) as demo:
107
 
108
  gr.Markdown(DESCRIPTION)
109
- gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
110
  gr.ChatInterface(
111
  fn=chat_llama3_8b,
112
  chatbot=chatbot,
@@ -135,8 +106,6 @@ with gr.Blocks(fill_height=True, css=css) as demo:
135
  ],
136
  cache_examples=False,
137
  )
138
-
139
- gr.Markdown(LICENSE)
140
-
141
  if __name__ == "__main__":
142
  demo.launch()
 
17
  </div>
18
  '''
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  # Load the tokenizer and model
21
  tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
22
  model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", torch_dtype=torch.float16).to('cuda')
 
69
  for text in streamer:
70
  outputs.append(text)
71
  #print(outputs)
72
+ return "".join(outputs)
73
 
74
 
75
  # Gradio block
76
+ chatbot=gr.Chatbot(height=600, label='Loki AI')
77
 
78
+ with gr.Blocks(fill_height=True) as demo:
79
 
80
  gr.Markdown(DESCRIPTION)
 
81
  gr.ChatInterface(
82
  fn=chat_llama3_8b,
83
  chatbot=chatbot,
 
106
  ],
107
  cache_examples=False,
108
  )
109
+
 
 
110
  if __name__ == "__main__":
111
  demo.launch()
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  accelerate
2
  transformers
3
- SentencePiece
 
 
1
  accelerate
2
  transformers
3
+ SentencePiece
4
+ torch