yutaogawa commited on
Commit
2147fd0
1 Parent(s): d209cfd

Add Fill-mask pipline with jmedroberta-base-manbyo-wordpiece model.

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. MANBYO_201907_Dic-utf8.dic +3 -0
  3. app.py +40 -10
  4. requirements.txt +2 -0
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ MANBYO_201907_Dic-utf8.dic filter=lfs diff=lfs merge=lfs -text
MANBYO_201907_Dic-utf8.dic ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2476d813f192bc069fe499c70e03351009c13ed408f786d8f360b3e59e826e41
3
+ size 71728213
app.py CHANGED
@@ -1,21 +1,51 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
-
4
- pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
5
 
 
 
6
  # def greet(name):
7
  # return f"Hello {name}!"
8
 
9
  # demo = gr.Interface(fn=greet, inputs="text", outputs="text")
10
 
11
- def predict(image):
12
- predictions = pipeline(image)
13
- return {p["label"]: p["score"] for p in predictions}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  demo = gr.Interface(
16
- predict,
17
- inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"),
18
- outputs=gr.outputs.Label(num_top_classes=2),
19
- title="Hot Dog? Or Not?"
 
20
  )
21
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline, AutoTokenizer
 
 
3
 
4
+ ##############
5
+ # <Greeting>
6
  # def greet(name):
7
  # return f"Hello {name}!"
8
 
9
  # demo = gr.Interface(fn=greet, inputs="text", outputs="text")
10
 
11
+
12
+ ##############
13
+ # <Hotdog Not Hotdog>
14
+ # pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
15
+
16
+ # def predict(image):
17
+ # predictions = pipeline(image)
18
+ # return {p["label"]: p["score"] for p in predictions}
19
+
20
+ # demo = gr.Interface(
21
+ # predict,
22
+ # inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"),
23
+ # outputs=gr.outputs.Label(num_top_classes=2),
24
+ # title="Hot Dog? Or Not?"
25
+ # )
26
+
27
+ tokenizer = AutoTokenizer.from_pretrained("alabnii/jmedroberta-base-manbyo-wordpiece", **{
28
+ "mecab_kwargs": {
29
+ "mecab_option": "-u MANBYO_201907_Dic-utf8.dic"
30
+ }
31
+ })
32
+
33
+ pipeline = pipeline(
34
+ "fill-mask",
35
+ model="alabnii/jmedroberta-base-manbyo-wordpiece",
36
+ tokenizer=tokenizer,
37
+ top_k=20
38
+ )
39
+
40
+ def fill(text):
41
+ filled = pipeline(text)
42
+ return {x["token_str"]: x["score"] for x in filled}
43
 
44
  demo = gr.Interface(
45
+ fill,
46
+ inputs="text",
47
+ outputs=gr.Label(label="Output"),
48
+ title="fill-mask",
49
+ examples=[['この患者は[MASK]と診断された。']]
50
  )
51
  demo.launch()
requirements.txt CHANGED
@@ -18,6 +18,7 @@ filelock==3.10.7
18
  fonttools==4.39.3
19
  frozenlist==1.3.3
20
  fsspec==2023.3.0
 
21
  gradio==3.24.0
22
  gradio_client==0.0.5
23
  h11==0.14.0
@@ -26,6 +27,7 @@ httpx==0.23.3
26
  huggingface-hub==0.13.3
27
  idna==3.4
28
  importlib-resources==5.12.0
 
29
  Jinja2==3.1.2
30
  jsonschema==4.17.3
31
  kiwisolver==1.4.4
 
18
  fonttools==4.39.3
19
  frozenlist==1.3.3
20
  fsspec==2023.3.0
21
+ fugashi==1.2.1
22
  gradio==3.24.0
23
  gradio_client==0.0.5
24
  h11==0.14.0
 
27
  huggingface-hub==0.13.3
28
  idna==3.4
29
  importlib-resources==5.12.0
30
+ ipadic==1.0.0
31
  Jinja2==3.1.2
32
  jsonschema==4.17.3
33
  kiwisolver==1.4.4