File size: 5,145 Bytes
7ba91c7
 
 
 
 
 
 
 
 
 
7f743cf
7ba91c7
 
51bd313
e32df9f
51bd313
 
50e1270
51bd313
 
 
c7097cf
51bd313
c3f89e4
 
 
7ba91c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c3f89e4
7ba91c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157

# # Mount Google Drive
# from google.colab import drive

# drive.mount('/content/drive')
import gradio as gr

from transformers import AutoModel, TFAutoModel

# Modèle Hugging Face à partir de l'identifiant du modèle
model_name = "gpt2"
huggingface_model = AutoModel.from_pretrained(model_name)

# SpΓ©cifiez l'identifiant de rΓ©fΓ©rentiel correct sans informations supplΓ©mentaires
repo_name = "motofanacc/monModel"

# Chemin vers les poids de votre modèle dans votre référentiel
model_checkpoint = "main"

# Charger le modèle à partir de Hugging Face Hub
model = TFAutoModel.from_pretrained(repo_name, from_pt=True, model_checkpoint=model_checkpoint)
#a

# # Charger les poids TensorFlow
# tensorflow_weights_path = "motofanacc/GradioChatBot/tree/main/Checkpoints"
# tensorflow_model = TFAutoModel.from_pretrained(tensorflow_weights_path)

def generate_text(model, input_text, max_length=50):
    return model.generate(input_text, max_length=max_length)

# preprocessor = keras_nlp.models.GPT2CausalLMPreprocessor.from_preset(
#     "gpt2_base_en",
#     sequence_length=128,
# )
# gpt2_lm = keras_nlp.models.GPT2CausalLM.from_preset(
#     "gpt2_base_en",
#     preprocessor=preprocessor,
# )

# gpt2_lm.load_weights('/content/drive/MyDrive/Checkpoints/weights')

# Gradio app
# <a href="https://www.freepik.com/icon/user_456212#fromView=search&term=avatar&track=ais&page=1&position=22&uuid=48125587-eeb5-4fe3-9eb2-f9fe7330f4fe">Icon by Freepik</a>
# <a href="https://www.freepik.com/icon/ai_2814666#fromView=search&term=robot&track=ais&page=1&position=20&uuid=58780fb9-dab6-4fb1-9928-479b2926a242">Icon by Freepik</a>

theme = gr.themes.Soft().set(
    background_fill_primary='white',
    background_fill_primary_dark='white',
)
with gr.Blocks(theme=theme,css="""
    .gradio-container {
        background-color: white;
        width: 70vw;
    }
    #chatbot{
        background-image: url("https://png.pngtree.com/thumb_back/fh260/background/20201014/pngtree-breast-cancer-awareness-pink-ribbons-background-design-image_417234.jpg");
    }
    #chatbot .bubble-wrap::-webkit-scrollbar {
        width: 20px;
    }

    #chatbot .bubble-wrap::-webkit-scrollbar-thumb {
        background-color: whitesmoke;
        border-radius: 20px;
        border: 6px solid transparent;
        background-clip: content-box;
    }

    #chatbot .bubble-wrap::-webkit-scrollbar-thumb:hover {
        background-color: grey;
    }

    #chatbot .bubble-wrap::-webkit-scrollbar-track {
        background-color: transparent;
    }
    #chatbot .message p{
        text-align: start;
        color: white;
    }
    h1, p {
        text-align: center;
        color: black;
    }
    body #footer_note {
        text-align: center;
        font-size: x-small;
        font-weight:bold;
    }
    .label {
        display:none;
    }
    textarea, .gallery-item, .gallery-item:hover {
      color: black;
      border: 1px black solid;
      background-color: white;
    }
    .user {
      background-color: #374151;
    }
    .user {
      background-color: #111827;
    }
    .gallery-item:hover {
      color: white;
      border: 1px black solid;
      background-color: black;
    }
    body gradio-app  {
      background-color: white;
    }
    """) as demo:
    gr.HTML(f"""
        <html>
        <body>
            <h1>Welcome, I'm CancerBot πŸ€–</h1>
            <p>Here you can ask all questions about cancer</p>
        </body>
        </html>
    """)

    def return_message(message, history, model=huggingface_model, max_length=128):
        if len(message) <= 1:
            gr.Warning('Please enter a message with more than one character.')
        elif len(message) > max_length:
            gr.Warning(f"Input should not exceed {max_length} characters.")
        else:
            cancer_answer = generate_text(model, message)
            message = "**You**\n" + message
            history.append([message, f"**CancerBot**\n{cancer_answer}"])
        return "", history

    chatbot = gr.Chatbot(
        height="60vh",
        bubble_full_width=True,
        avatar_images=(["/content/drive/MyDrive/Data/avatar.png", "/content/drive/MyDrive/Data/robot.png"]),
        show_copy_button=True,
        likeable=True,
        layout='bubble',
        elem_id='chatbot',
        show_label=False,
    )
    with gr.Row():
        input_box = gr.Textbox(placeholder="Message CancerBot...", container=False, scale=9)
        submit_btn = gr.Button(value="⬆", scale=1)
        submit_btn.click(return_message, [input_box, chatbot],[input_box, chatbot])
    examples = gr.Examples(examples=["What is a thyroid cancer ?", "How can I know that I have a lung cancer ?",
			"How many types of cancer ?"], inputs=[input_box], label="")
    input_box.submit(return_message, [input_box, chatbot],[input_box, chatbot])
    gr.HTML(f"""
            <html>
            <body>
            <p id="footer_note">CancerBot is based on cancer documents. Consider checking important information.</p>
            </body>
            </html>
            """)
demo.queue(default_concurrency_limit=34) # 32 students, 2 teachers
demo.launch(share=True)