dipankardas011 commited on
Commit
115f6cd
1 Parent(s): 41747cf

Added the first version of model

Browse files

Signed-off-by: Dipankar Das <dipankardas0115@gmail.com>

Files changed (3) hide show
  1. README.md +5 -3
  2. app.py +6 -27
  3. requirements.txt +0 -1
README.md CHANGED
@@ -1,10 +1,12 @@
1
  ---
2
- title: Second Language Model
3
- emoji: 🏢
4
  colorFrom: green
5
  colorTo: blue
6
  sdk: docker
7
- pinned: false
8
  ---
9
 
 
 
10
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: ksctl GPT2 bot Model
3
+ emoji: ☸️
4
  colorFrom: green
5
  colorTo: blue
6
  sdk: docker
7
+ pinned: true
8
  ---
9
 
10
+ Used GPT2
11
+
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,12 +1,10 @@
1
  from fastapi import FastAPI
2
  from fastapi.responses import RedirectResponse
3
- # from transformers import pipeline
4
- from PIL import Image
5
  import requests
6
- from transformers import AutoProcessor, Pix2StructForConditionalGeneration
7
 
8
- processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
9
- model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base")
 
10
 
11
 
12
  app = FastAPI()
@@ -16,25 +14,6 @@ async def docs_redirect():
16
  return RedirectResponse(url='/docs')
17
 
18
  @app.get("/generate")
19
- def generate(url: str):
20
- """
21
- https://huggingface.co/docs/transformers/main/en/model_doc/pix2struct#transformers.Pix2StructForConditionalGeneration
22
- https://huggingface.co/google/pix2struct-widget-captioning-large/blob/main/README.md
23
- """
24
- # url = "https://www.ilankelman.org/stopsigns/australia.jpg"
25
- image = Image.open(requests.get(url, stream=True).raw)
26
-
27
- inputs = processor(images=image, return_tensors="pt")
28
-
29
- # autoregressive generation
30
- generated_ids = model.generate(**inputs, max_new_tokens=50)
31
- generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
32
- print(generated_text)
33
-
34
- # conditional generation
35
- text = "A picture of"
36
- inputs = processor(text=text, images=image, return_tensors="pt", add_special_tokens=False)
37
-
38
- generated_ids1 = model.generate(**inputs, max_new_tokens=50)
39
- generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0]
40
- return {"output": {"autoregressive": generated_text, "conditional": generated_text1},}
 
1
  from fastapi import FastAPI
2
  from fastapi.responses import RedirectResponse
 
 
3
  import requests
 
4
 
5
+ from transformers import pipeline, set_seed
6
+ generator = pipeline('text-generation', model='gpt2')
7
+ set_seed(42)
8
 
9
 
10
  app = FastAPI()
 
14
  return RedirectResponse(url='/docs')
15
 
16
  @app.get("/generate")
17
+ def generate(text: str):
18
+ output: list[dict] = generator(text, max_length=200, num_return_sequences=1)
19
+ return {"output": output}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -4,4 +4,3 @@ uvicorn[standard]==0.17.*
4
  sentencepiece==0.1.*
5
  torch==1.11.*
6
  transformers==4.*
7
- Pillow
 
4
  sentencepiece==0.1.*
5
  torch==1.11.*
6
  transformers==4.*