AdamNovotnyCom commited on
Commit
8f4f3a6
1 Parent(s): a86fdf9
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -5,16 +5,16 @@ import torch
5
  import transformers
6
  from transformers import AutoTokenizer
7
 
8
- logging.info("APP startup")
 
9
  print("APP startup")
10
 
11
  pipe_flan = transformers.pipeline("text2text-generation", model="google/flan-t5-small")
12
  def google_flan(input_text, request: gr.Request):
13
  print("New response 2")
14
  print(request.query_params)
15
- print(request.query_params["name"])
16
- print(os.environ["HF_TOKEN"][:5])
17
- logging.info(os.environ["HF_TOKEN"][:5])
18
  return pipe_flan(input_text)
19
 
20
  # model = "meta-llama/Llama-2-7b-chat-hf"
 
5
  import transformers
6
  from transformers import AutoTokenizer
7
 
8
+ logging.basicConfig(level=logging.INFO)
9
+
10
  print("APP startup")
11
 
12
  pipe_flan = transformers.pipeline("text2text-generation", model="google/flan-t5-small")
13
  def google_flan(input_text, request: gr.Request):
14
  print("New response 2")
15
  print(request.query_params)
16
+ print(os.environ.get("HF_TOKEN")[:5])
17
+ logging.info(os.environ.get("HF_TOKEN")[:5])
 
18
  return pipe_flan(input_text)
19
 
20
  # model = "meta-llama/Llama-2-7b-chat-hf"