File size: 6,921 Bytes
0d894e0
cc35dba
0d894e0
 
 
 
3addaf9
0d894e0
da29f14
 
 
304bd57
3addaf9
da29f14
 
 
3addaf9
cc35dba
0d894e0
 
 
 
3addaf9
 
0d894e0
 
 
 
 
da29f14
0d894e0
 
 
 
 
 
 
 
 
 
 
 
da29f14
 
 
 
 
 
 
 
0d894e0
 
da29f14
 
 
0d894e0
da29f14
 
0d894e0
da29f14
 
 
 
 
0d894e0
da29f14
 
 
 
 
 
0d894e0
da29f14
 
 
 
 
 
0d894e0
 
 
da29f14
0d894e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
da29f14
 
 
0d894e0
da29f14
 
0d894e0
da29f14
 
 
0d894e0
 
 
da29f14
 
 
 
 
 
 
 
 
 
 
0d894e0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
from goose3 import Goose
from resources import *
from sentence_transformers import SentenceTransformer, util
from transformers import pipeline
import gradio as gr


# Bellamy's stuff
bellamy_bowie_classifier_candidate_labels = ["manager", "engineer", "technician", "politician", "scientist", "student", "journalist", "marketeer", "spokesperson", "other"]
bellamy_bowie_classifier_candidate_labels_preselection = ["manager", "engineer", "technician", "politician", "scientist", "student", "journalist"]
bellamy_bowie_classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")


def bellamy_bowie_predict(candidate_labels_selected, sequence):
    outputs = bellamy_bowie_classifier(sequence, candidate_labels_selected)
    return dict(zip(outputs['labels'], outputs['scores']))  # Extract labels and scores from the outputs dictionary


# Urly's and Murly's stuff
# configuration
urly_murly_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
urly_murly_scraper = Goose()


def urly_murly_predict(url_1, url_2):
    # user input and text acquisition
    sequence_one = urly_murly_scraper.extract(url=url_1)
    sequence_two = urly_murly_scraper.extract(url=url_2)
    urly_murly_scraper.close()

    # #Compute embedding and similarity for both texts
    sentences = [sequence_one.cleaned_text, sequence_two.cleaned_text]
    embedding_1, embedding_2 = urly_murly_model.encode(sentences[0], convert_to_tensor=True), urly_murly_model.encode(sentences[1], convert_to_tensor=True)
    output = float(util.pytorch_cos_sim(embedding_1, embedding_2))

    # format result
    output_text = "The two webpages have a similarity score of {}.".format("%.2f" % (output * 100))

    return output_text, sequence_one.title, sequence_one.cleaned_text, sequence_two.title, sequence_two.cleaned_text


# Ellis' stuff
ellis_cappy_captioner = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base", max_new_tokens=40)


def ellis_cappy_captionizer(img):
    captions = ellis_cappy_captioner(img)
    return captions[0]["generated_text"]


with gr.Blocks() as aidademo:
    # gr.Markdown("Start typing below and then click **Run** to see the output.")

    with gr.Tab("Bellamy Bowie"):
        with gr.Row():
            with gr.Column(scale=2):
                gr.HTML(bellamy_bowie_description)
            with gr.Column(scale=1):
                gr.Image(bellamy_bowie_hero, show_label=False)
        with gr.Row():
            with gr.Column(scale=1):
                bellamy_bowie_checkbox_input = gr.CheckboxGroup(choices=bellamy_bowie_classifier_candidate_labels, value=bellamy_bowie_classifier_candidate_labels_preselection, label="Target personas of your message", info="Recommendation: Don't change the preselection for your first analysis.")
                bellamy_bowie_textbox_input = gr.Textbox(lines=10, placeholder="Your text goes here", label="Write or paste your message to classify")
                with gr.Row():
                    gr.ClearButton(components=bellamy_bowie_textbox_input, value="Clear")
                    bellamy_bowie_submit_button = gr.Button("Submit", variant="primary")
            with gr.Column(scale=1):
                bellamy_bowie_outputs = gr.Label(label="Matching scores by personas")
        with gr.Row():
            with gr.Column(scale=1):
                gr.Examples(bellamy_bowie_examples, inputs=[bellamy_bowie_textbox_input])
                gr.HTML(bellamy_bowie_note_quality)
                gr.HTML(bellamy_bowie_article)
        bellamy_bowie_submit_button.click(fn=bellamy_bowie_predict, inputs=[bellamy_bowie_checkbox_input, bellamy_bowie_textbox_input], outputs=bellamy_bowie_outputs)

    with gr.Tab("Urly & Murly Simmy"):
        with gr.Row():
            with gr.Column(scale=1):
                gr.HTML(urly_murly_description)
            with gr.Column(scale=1):
                gr.Image(urly_murly_hero, show_label=False)
        with gr.Row():
            with gr.Column(scale=1):
                urly_murly_textbox_input_1 = gr.Textbox(label="URL of first webpage")
                urly_murly_textbox_input_2 = gr.Textbox(label="URL of second webpage")
                with gr.Row():
                    urly_murly_clear_button = gr.ClearButton(components=[urly_murly_textbox_input_1, urly_murly_textbox_input_2], value="Clear")
                    urly_murly_submit_button = gr.Button("Compare", variant="primary")
                gr.HTML(urly_murly_interpretation)
            with gr.Column(scale=1):
                urly_murly_textbox_output_1 = gr.Textbox(label="Similarity evaluation in the value range of [-100:100]")
                urly_murly_textbox_output_2 = gr.Textbox(label="First webpage: extracted title")
                urly_murly_textbox_output_3 = gr.Textbox(label="First webpage: extracted text")
                urly_murly_textbox_output_4 = gr.Textbox(label="Second webpage: extracted title")
                urly_murly_textbox_output_5 = gr.Textbox(label="Second webpage: extracted text")
        with gr.Row():
            with gr.Column(scale=1):
                gr.Examples(urly_murly_examples, inputs=[urly_murly_textbox_input_1, urly_murly_textbox_input_2])
                gr.HTML(urly_murly_about_scraping)
                gr.HTML(urly_murly_article)

        urly_murly_submit_button.click(fn=urly_murly_predict,
                                       inputs=[urly_murly_textbox_input_1, urly_murly_textbox_input_2],
                                       outputs=[urly_murly_textbox_output_1, urly_murly_textbox_output_2,
                                                urly_murly_textbox_output_3, urly_murly_textbox_output_4,
                                                urly_murly_textbox_output_5])

    with gr.Tab("Ellis Cappy"):
        with gr.Row():
            with gr.Column(scale=2):
                gr.HTML(ellis_cappy_description)
            with gr.Column(scale=1):
                gr.Image(ellis_cappy_hero, show_label=False)
        with gr.Row():
            with gr.Column(scale=1):
                ellis_cappy_image_input = gr.Image(type="pil", label=None)
                with gr.Row():
                    gr.ClearButton(components=ellis_cappy_image_input, value="Clear")
                    ellis_cappy_submit_button = gr.Button("Captionize", variant="primary")
            with gr.Column(scale=1):
                ellis_cappy_textbox_output = gr.Textbox(label="Suggested caption", lines=2)
                gr.HTML(ellis_cappy_note_quality)
        with gr.Row():
            with gr.Column(scale=1):
                gr.Examples(ellis_cappy_examples, inputs=[ellis_cappy_image_input])
                gr.HTML(ellis_cappy_article)

    ellis_cappy_submit_button.click(fn=ellis_cappy_captionizer, inputs=ellis_cappy_image_input,
                                    outputs=ellis_cappy_textbox_output, api_name="captionizer")

aidademo.launch()