nicolamunozi commited on
Commit
ab62420
1 Parent(s): 989a69c
Files changed (1) hide show
  1. app.py +10 -14
app.py CHANGED
@@ -1,23 +1,19 @@
1
- pip install transformers
2
-
3
- from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
4
- import torch
5
  import streamlit as st
6
 
7
- #Setting.
8
- torch.set_default_tensor_type(torch.cuda.FloatTensor)
9
-
10
-
11
- #Loading model.
12
-
13
- model = AutoModelForCausalLM.from_pretrained("bigscience/bloom", use_cache=True)
14
- tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom")
15
 
 
 
 
 
 
 
 
16
 
17
 
18
  #For app.
19
  prompt = st.text_input("Input")
20
- input_ids = tokenizer(prompt, return_tensors="pt").to(0)
21
- sample = model.generate(**input_ids, max_length=100, top_k=0, temperature=0.7)
22
  output = st.write(tokenizer.decode(sample[0]))
23
 
 
1
+ import requests
 
 
 
2
  import streamlit as st
3
 
4
+ API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
5
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
 
 
 
 
 
 
6
 
7
+ def query(payload):
8
+ response = requests.post(API_URL, headers=headers, json=payload)
9
+ return response.json()
10
+
11
+ output = query({
12
+ "inputs": "Can you please let us know more details about your ",
13
+ })
14
 
15
 
16
  #For app.
17
  prompt = st.text_input("Input")
 
 
18
  output = st.write(tokenizer.decode(sample[0]))
19