benfunke98 commited on
Commit
93832f6
1 Parent(s): f8bd053

added token

Browse files
Files changed (1) hide show
  1. app.py +3 -19
app.py CHANGED
@@ -1,5 +1,7 @@
1
  from fastapi import FastAPI, UploadFile, File
2
  from transformers import pipeline
 
 
3
  from fastai.vision.all import *
4
  from PIL import Image
5
  import os
@@ -12,24 +14,6 @@ access_token = os.getenv("HF_TOKEN")
12
  # of the app. This way, we can use the docs as a landing page for the app on Spaces.
13
  app = FastAPI(docs_url="/")
14
 
15
- pipe = pipeline("text2text-generation", model="google/flan-t5-small")
16
- categories = ('Heart', 'Oblong', 'Oval', 'Round', 'Square')
17
- learn = load_learner('model.pkl')
18
-
19
- # Überprüfe, ob das Zugriffstoken vorhanden ist
20
- if access_token is None:
21
- raise ValueError("Access token is missing. Make sure it is set as an environment variable.")
22
-
23
- @app.get("/generate")
24
- def generate(text: str):
25
- """
26
- Using the text2text-generation pipeline from `transformers`, generate text
27
- from the given input text. The model used is `google/flan-t5-small`, which
28
- can be found [here](https://huggingface.co/google/flan-t5-small).
29
- """
30
- output = pipe(text)
31
- return {"output": output[0]["generated_text"]}
32
-
33
  @app.post("/face-analyse")
34
  async def face_analyse(file: UploadFile = File(...)):
35
  # Read the uploaded file content
@@ -62,7 +46,7 @@ async def face_analyse(file: UploadFile = File(...)):
62
 
63
  # Initialisiere das Modell und den Tokenizer
64
  model = "meta-llama/CodeLlama-7b-hf"
65
- tokenizer = AutoTokenizer.from_pretrained(model)
66
  llama_pipeline = pipeline(
67
  "text-generation",
68
  model=model,
 
1
  from fastapi import FastAPI, UploadFile, File
2
  from transformers import pipeline
3
+ from transformers import AutoTokenizer
4
+ import transformers
5
  from fastai.vision.all import *
6
  from PIL import Image
7
  import os
 
14
  # of the app. This way, we can use the docs as a landing page for the app on Spaces.
15
  app = FastAPI(docs_url="/")
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  @app.post("/face-analyse")
18
  async def face_analyse(file: UploadFile = File(...)):
19
  # Read the uploaded file content
 
46
 
47
  # Initialisiere das Modell und den Tokenizer
48
  model = "meta-llama/CodeLlama-7b-hf"
49
+ tokenizer = AutoTokenizer.from_pretrained(model, token=access_token)
50
  llama_pipeline = pipeline(
51
  "text-generation",
52
  model=model,