AmadouDiaV commited on
Commit
8a64278
β€’
1 Parent(s): 50e1270
Files changed (4) hide show
  1. app copy.py +157 -0
  2. app.py +27 -32
  3. requirements.txt +0 -4
  4. weight.h5 +0 -3
app copy.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # # Mount Google Drive
3
+ # from google.colab import drive
4
+
5
+ # drive.mount('/content/drive')
6
+ import gradio as gr
7
+
8
+ from transformers import AutoModel, TFAutoModel
9
+
10
+ # Modèle Hugging Face à partir de l'identifiant du modèle
11
+ model_name = "gpt2"
12
+ huggingface_model = AutoModel.from_pretrained(model_name)
13
+
14
+ # SpΓ©cifiez l'identifiant de rΓ©fΓ©rentiel correct sans informations supplΓ©mentaires
15
+ repo_name = "motofanacc/monModel"
16
+
17
+ # Chemin vers les poids de votre modèle dans votre référentiel
18
+ model_checkpoint = "main"
19
+
20
+ # Charger le modèle à partir de Hugging Face Hub
21
+ model = TFAutoModel.from_pretrained(repo_name, from_pt=True, model_checkpoint=model_checkpoint)
22
+ #a
23
+
24
+ # # Charger les poids TensorFlow
25
+ # tensorflow_weights_path = "motofanacc/GradioChatBot/tree/main/Checkpoints"
26
+ # tensorflow_model = TFAutoModel.from_pretrained(tensorflow_weights_path)
27
+
28
+ def generate_text(model, input_text, max_length=50):
29
+ return model.generate(input_text, max_length=max_length)
30
+
31
+ # preprocessor = keras_nlp.models.GPT2CausalLMPreprocessor.from_preset(
32
+ # "gpt2_base_en",
33
+ # sequence_length=128,
34
+ # )
35
+ # gpt2_lm = keras_nlp.models.GPT2CausalLM.from_preset(
36
+ # "gpt2_base_en",
37
+ # preprocessor=preprocessor,
38
+ # )
39
+
40
+ # gpt2_lm.load_weights('/content/drive/MyDrive/Checkpoints/weights')
41
+
42
+ # Gradio app
43
+ # <a href="https://www.freepik.com/icon/user_456212#fromView=search&term=avatar&track=ais&page=1&position=22&uuid=48125587-eeb5-4fe3-9eb2-f9fe7330f4fe">Icon by Freepik</a>
44
+ # <a href="https://www.freepik.com/icon/ai_2814666#fromView=search&term=robot&track=ais&page=1&position=20&uuid=58780fb9-dab6-4fb1-9928-479b2926a242">Icon by Freepik</a>
45
+
46
+ theme = gr.themes.Soft().set(
47
+ background_fill_primary='white',
48
+ background_fill_primary_dark='white',
49
+ )
50
+ with gr.Blocks(theme=theme,css="""
51
+ .gradio-container {
52
+ background-color: white;
53
+ width: 70vw;
54
+ }
55
+ #chatbot{
56
+ background-image: url("https://png.pngtree.com/thumb_back/fh260/background/20201014/pngtree-breast-cancer-awareness-pink-ribbons-background-design-image_417234.jpg");
57
+ }
58
+ #chatbot .bubble-wrap::-webkit-scrollbar {
59
+ width: 20px;
60
+ }
61
+
62
+ #chatbot .bubble-wrap::-webkit-scrollbar-thumb {
63
+ background-color: whitesmoke;
64
+ border-radius: 20px;
65
+ border: 6px solid transparent;
66
+ background-clip: content-box;
67
+ }
68
+
69
+ #chatbot .bubble-wrap::-webkit-scrollbar-thumb:hover {
70
+ background-color: grey;
71
+ }
72
+
73
+ #chatbot .bubble-wrap::-webkit-scrollbar-track {
74
+ background-color: transparent;
75
+ }
76
+ #chatbot .message p{
77
+ text-align: start;
78
+ color: white;
79
+ }
80
+ h1, p {
81
+ text-align: center;
82
+ color: black;
83
+ }
84
+ body #footer_note {
85
+ text-align: center;
86
+ font-size: x-small;
87
+ font-weight:bold;
88
+ }
89
+ .label {
90
+ display:none;
91
+ }
92
+ textarea, .gallery-item, .gallery-item:hover {
93
+ color: black;
94
+ border: 1px black solid;
95
+ background-color: white;
96
+ }
97
+ .user {
98
+ background-color: #374151;
99
+ }
100
+ .user {
101
+ background-color: #111827;
102
+ }
103
+ .gallery-item:hover {
104
+ color: white;
105
+ border: 1px black solid;
106
+ background-color: black;
107
+ }
108
+ body gradio-app {
109
+ background-color: white;
110
+ }
111
+ """) as demo:
112
+ gr.HTML(f"""
113
+ <html>
114
+ <body>
115
+ <h1>Welcome, I'm CancerBot πŸ€–</h1>
116
+ <p>Here you can ask all questions about cancer</p>
117
+ </body>
118
+ </html>
119
+ """)
120
+
121
+ def return_message(message, history, model=huggingface_model, max_length=128):
122
+ if len(message) <= 1:
123
+ gr.Warning('Please enter a message with more than one character.')
124
+ elif len(message) > max_length:
125
+ gr.Warning(f"Input should not exceed {max_length} characters.")
126
+ else:
127
+ cancer_answer = generate_text(model, message)
128
+ message = "**You**\n" + message
129
+ history.append([message, f"**CancerBot**\n{cancer_answer}"])
130
+ return "", history
131
+
132
+ chatbot = gr.Chatbot(
133
+ height="60vh",
134
+ bubble_full_width=True,
135
+ avatar_images=(["/content/drive/MyDrive/Data/avatar.png", "/content/drive/MyDrive/Data/robot.png"]),
136
+ show_copy_button=True,
137
+ likeable=True,
138
+ layout='bubble',
139
+ elem_id='chatbot',
140
+ show_label=False,
141
+ )
142
+ with gr.Row():
143
+ input_box = gr.Textbox(placeholder="Message CancerBot...", container=False, scale=9)
144
+ submit_btn = gr.Button(value="⬆", scale=1)
145
+ submit_btn.click(return_message, [input_box, chatbot],[input_box, chatbot])
146
+ examples = gr.Examples(examples=["What is a thyroid cancer ?", "How can I know that I have a lung cancer ?",
147
+ "How many types of cancer ?"], inputs=[input_box], label="")
148
+ input_box.submit(return_message, [input_box, chatbot],[input_box, chatbot])
149
+ gr.HTML(f"""
150
+ <html>
151
+ <body>
152
+ <p id="footer_note">CancerBot is based on cancer documents. Consider checking important information.</p>
153
+ </body>
154
+ </html>
155
+ """)
156
+ demo.queue(default_concurrency_limit=34) # 32 students, 2 teachers
157
+ demo.launch(share=True)
app.py CHANGED
@@ -1,43 +1,38 @@
 
 
1
 
2
- # # Mount Google Drive
3
- # from google.colab import drive
4
 
5
- # drive.mount('/content/drive')
6
- import gradio as gr
7
-
8
- from transformers import AutoModel, TFAutoModel
9
-
10
- # Modèle Hugging Face à partir de l'identifiant du modèle
11
- model_name = "gpt2"
12
- huggingface_model = AutoModel.from_pretrained(model_name)
13
-
14
- # SpΓ©cifiez l'identifiant de rΓ©fΓ©rentiel correct sans informations supplΓ©mentaires
15
- repo_name = "motofanacc/monModel"
16
 
17
- # Chemin vers les poids de votre modèle dans votre référentiel
18
- model_checkpoint = "main"
19
 
20
- # Charger le modèle à partir de Hugging Face Hub
21
- model = TFAutoModel.from_pretrained(repo_name, from_pt=True, model_checkpoint=model_checkpoint)
22
- #a
23
 
24
- # # Charger les poids TensorFlow
25
- # tensorflow_weights_path = "motofanacc/GradioChatBot/tree/main/Checkpoints"
26
- # tensorflow_model = TFAutoModel.from_pretrained(tensorflow_weights_path)
27
 
28
  def generate_text(model, input_text, max_length=50):
29
  return model.generate(input_text, max_length=max_length)
30
 
31
- # preprocessor = keras_nlp.models.GPT2CausalLMPreprocessor.from_preset(
32
- # "gpt2_base_en",
33
- # sequence_length=128,
34
- # )
35
- # gpt2_lm = keras_nlp.models.GPT2CausalLM.from_preset(
36
- # "gpt2_base_en",
37
- # preprocessor=preprocessor,
38
- # )
39
 
40
- # gpt2_lm.load_weights('/content/drive/MyDrive/Checkpoints/weights')
41
 
42
  # Gradio app
43
  # <a href="https://www.freepik.com/icon/user_456212#fromView=search&term=avatar&track=ais&page=1&position=22&uuid=48125587-eeb5-4fe3-9eb2-f9fe7330f4fe">Icon by Freepik</a>
@@ -118,7 +113,7 @@ with gr.Blocks(theme=theme,css="""
118
  </html>
119
  """)
120
 
121
- def return_message(message, history, model=huggingface_model, max_length=128):
122
  if len(message) <= 1:
123
  gr.Warning('Please enter a message with more than one character.')
124
  elif len(message) > max_length:
@@ -154,4 +149,4 @@ with gr.Blocks(theme=theme,css="""
154
  </html>
155
  """)
156
  demo.queue(default_concurrency_limit=34) # 32 students, 2 teachers
157
- demo.launch(share=True)
 
1
+ # -*- coding: utf-8 -*-
2
+ """GradioApp - Final.ipynb
3
 
4
+ Automatically generated by Colaboratory.
 
5
 
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/13x0cApCbqR5GKWE2nrk7DV4rcgJcO_HF
8
+ """
9
+ import subprocess
10
+ import sys
 
 
 
 
 
 
11
 
12
+ def install(package):
13
+ subprocess.check_call([sys.executable, "-m", "pip", "install", package])
14
 
15
+ install("typing-extensions")
16
+ install("gradio")
17
+ install("keras_nlp")
18
 
19
+ from tensorflow import keras
20
+ import keras_nlp
21
+ import gradio as gr
22
 
23
  def generate_text(model, input_text, max_length=50):
24
  return model.generate(input_text, max_length=max_length)
25
 
26
+ preprocessor = keras_nlp.models.GPT2CausalLMPreprocessor.from_preset(
27
+ "gpt2_medium_en",
28
+ sequence_length=128,
29
+ )
30
+ gpt2_lm = keras_nlp.models.GPT2CausalLM.from_preset(
31
+ "gpt2_medium_en",
32
+ preprocessor=preprocessor,
33
+ )
34
 
35
+ gpt2_lm.load_weights('./Checkpoints')
36
 
37
  # Gradio app
38
  # <a href="https://www.freepik.com/icon/user_456212#fromView=search&term=avatar&track=ais&page=1&position=22&uuid=48125587-eeb5-4fe3-9eb2-f9fe7330f4fe">Icon by Freepik</a>
 
113
  </html>
114
  """)
115
 
116
+ def return_message(message, history, model=gpt2_lm, max_length=128):
117
  if len(message) <= 1:
118
  gr.Warning('Please enter a message with more than one character.')
119
  elif len(message) > max_length:
 
149
  </html>
150
  """)
151
  demo.queue(default_concurrency_limit=34) # 32 students, 2 teachers
152
+ demo.launch(share=True,favicon_path="/content/drive/MyDrive/Data/robot.png")
requirements.txt DELETED
@@ -1,4 +0,0 @@
1
- transformers
2
- tensorflow
3
- torch
4
- tf-keras
 
 
 
 
 
weight.h5 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:318750929cbd308cef9661edb78764506fece63ddd58286cc8a7d561f680012b
3
- size 245506128