harikach24 commited on
Commit
562a075
1 Parent(s): 430115e

ML app added, SDK changed to gradio

Browse files
Files changed (4) hide show
  1. Dockerfile +0 -13
  2. README.md +2 -2
  3. app.py +45 -8
  4. requirements.txt +3 -1
Dockerfile DELETED
@@ -1,13 +0,0 @@
1
- FROM python:3.10
2
-
3
- ADD requirements.txt .
4
-
5
- RUN pip install -r requirements.txt
6
-
7
- RUN rm requirements.txt
8
-
9
- ADD app.py .
10
-
11
- EXPOSE 8001
12
-
13
- CMD python app.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -3,11 +3,11 @@ title: SummaryDemo
3
  emoji: 🔥
4
  colorFrom: yellow
5
  colorTo: yellow
6
- sdk: docker
7
  sdk_version: 4.24.0
8
  app_file: app.py
9
  pinned: false
10
- app_port: 8001
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
3
  emoji: 🔥
4
  colorFrom: yellow
5
  colorTo: yellow
6
+ sdk: gradio
7
  sdk_version: 4.24.0
8
  app_file: app.py
9
  pinned: false
10
+ #app_port: 8001
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,13 +1,50 @@
 
 
 
1
  import gradio as gr
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- iface = gr.Interface(fn=greet,
7
- inputs="text",
8
- outputs="text",
9
- title="Simple Application",
10
- description="via docker",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  allow_flagging="never",
12
  )
13
- iface.launch(server_name = "0.0.0.0", server_port = 8001)
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+ from peft import PeftModel
4
  import gradio as gr
5
 
6
+ username = "harikach24"
 
7
 
8
+ base_checkpoint = username + '/dialogue_Summary'
9
+
10
+ peft_model_id = username + '/dialogue_Summary_peft'
11
+
12
+ # Load tokenizer
13
+ tokenizer = AutoTokenizer.from_pretrained(base_checkpoint)
14
+
15
+ # Load Base model
16
+ peft_model_base = AutoModelForSeq2SeqLM.from_pretrained(base_checkpoint)
17
+
18
+ # Load PEFT model
19
+ loaded_peft_model = PeftModel.from_pretrained(
20
+ model=peft_model_base, model_id = peft_model_id, is_trainable=False,)
21
+
22
+ def generate_summary(dialogue):
23
+ llm = loaded_peft_model
24
+ """Prepare prompt -> tokenize -> generate output using LLM -> detokenize
25
+ output"""
26
+
27
+ input_prompt = f"""
28
+ Summarize the following conversation.
29
+ {dialogue}
30
+ Summary:
31
+ """
32
+
33
+ input_ids = tokenizer(input_prompt, return_tensors='pt')
34
+
35
+ tokenized_output = llm.generate(input_ids=input_ids['input_ids'],
36
+ min_length=30, max_length=200, )
37
+
38
+ summary = tokenizer.decode(tokenized_output[0], skip_special_tokens=True)
39
+
40
+ return summary
41
+
42
+ # Create the Gradio demo
43
+ iface = gr.Interface(fn=generate_summary,
44
+ inputs="textbox",
45
+ outputs="textbox",
46
+ title="Dialogue Summarization",
47
+ description="via gradio",
48
  allow_flagging="never",
49
  )
50
+ iface.launch()
requirements.txt CHANGED
@@ -1,4 +1,6 @@
1
  # Dependencies for application to be added to this file
2
 
3
  numpy
4
- gradio
 
 
 
1
  # Dependencies for application to be added to this file
2
 
3
  numpy
4
+ gradio
5
+ transformers[torch]
6
+ peft