File size: 7,041 Bytes
beb9ce6
 
 
 
 
 
 
7415289
beb9ce6
d051e63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
beb9ce6
 
 
 
 
12c6795
beb9ce6
 
 
7415289
0f87b5f
beb9ce6
 
d051e63
 
 
beb9ce6
d051e63
beb9ce6
 
 
 
 
 
 
 
 
 
 
 
 
 
12c6795
beb9ce6
12c6795
beb9ce6
 
1b7769d
 
beb9ce6
 
 
2b9ffad
 
 
 
 
 
 
 
28634ff
 
 
7415289
d051e63
7415289
 
beb9ce6
 
 
 
 
 
 
 
 
 
 
 
 
0f87b5f
12c6795
beb9ce6
 
 
 
1b7769d
beb9ce6
5fcfd77
beb9ce6
 
 
96ca0ee
beb9ce6
 
 
 
 
ef71177
 
beb9ce6
 
 
 
 
 
 
 
 
 
 
12c6795
beb9ce6
12c6795
1b7769d
beb9ce6
12c6795
 
 
 
beb9ce6
 
 
 
 
 
 
7415289
beb9ce6
 
 
d051e63
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
import gradio as gr
import cohere
import os
import re
import uuid

cohere_api_key = os.getenv("COHERE_API_KEY")
co = cohere.Client(cohere_api_key, client_name="huggingface-rp")

# Custom Instructions
CUSTOM_INSTRUCTIONS = """
You are D-LOGIC, a helpful AI assistant created by Rafał Dembski. Rafał Dembski is a hobbyist and self-taught enthusiast with a passion for programming and artificial intelligence. Your responses should be:
- Accurate, high-quality, and professionally written
- Informative, logical, actionable, and well-formatted
- Positive, interesting, engaging, and relevant
- Use emoticons and references to sources of information, if possible
- Introduce humor, wit, and sarcasm appropriately
- Always write in the user's language
- Deeply analyze the context and intent behind the user's questions
- Ensure responses are error-free and well-researched
- Reflect a positive attitude, enthusiasm, and empathy

You are also a master of content creation. You can generate professional, high-quality content across various formats, including but not limited to:
- Social media posts
- Short stories
- Novels
- Reviews
- Marketing content
- Blog posts
- News articles
- Technical documentation
- Scripts for videos and podcasts
- Product descriptions
- Educational materials
- Inspirational quotes
- Poems
- Song lyrics
- Research summaries
- Case studies
- White papers
- User manuals
- Press releases
- Speeches

To make D-LOGIC beloved by users, ensure to:
- Use humor and wit to keep conversations lively and entertaining
- Employ sarcasm when appropriate, while ensuring it is clear and not offensive
- Display a positive attitude and enthusiasm in all interactions
- Be empathetic and show understanding of the user's feelings and situations
- Provide insightful and thoughtful responses that demonstrate intelligence and creativity
"""

def trigger_example(example):
    chat, updated_history = generate_response(example)
    return chat, updated_history
        
def generate_response(user_message, cid, history=None):
    
    if history is None:
        history = []
    if cid == "" or None:    
        cid = str(uuid.uuid4())
        
    history.append(user_message)
    
    # Prepend the custom instructions to the user's message
    user_message_with_instructions = f"{CUSTOM_INSTRUCTIONS}\n\n{user_message}"
     
    stream = co.chat_stream(message=user_message_with_instructions, conversation_id=cid, model='command-r-plus', connectors=[{"id":"web-search"}], temperature=0.3)
    
    output = ""
    
    for idx, response in enumerate(stream):
        if response.event_type == "text-generation":
            output += response.text
        if idx == 0:
            history.append(" " + output)
        else:
            history[-1] = output
        chat = [
            (history[i].strip(), history[i + 1].strip())
            for i in range(0, len(history) - 1, 2)
        ] 
        yield chat, history, cid
        
    return chat, history, cid
    

def clear_chat():
    return [], [], str(uuid.uuid4())


examples = [
    "What are 8 good questions to get to know a stranger?",
    "Create a list of 10 unusual excuses people might use to get out of a work meeting",
    "Write a python code to reverse a string",
    "Explain the relativity theory in French",
    "Como sair de um helicóptero que caiu na água?",
    "Formally introduce the transformer architecture with notation.",
    "¿Cómo le explicarías el aprendizaje automático a un extraterrestre?",
    "Summarize recent news about the North American tech job market",
    "Explain gravity to a chicken.",
    "Is the world discrete or analog?",
    "What is the memory cost in a typical implementation of an all-gather operation?",
    "Give me a brief history of the golden era of Cantopop.",
    "Descrivi il processo di creazione di un capolavoro, como se fossi un artista del Rinascimento a Firenze.",
    "Explique-moi le sens de la vie selon un grand auteur littéraire.",
    "Give me an example of an endangered species and let me know what I can do to help preserve it"
]

custom_css = """
#logo-img {
    border: none !important;
}
#chat-message {
    font-size: 14px;
    min-height: 300px;
}
"""

with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
    cid = gr.State("")

    with gr.Row():
        with gr.Column(scale=1):
            gr.Image("logoplus.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
        with gr.Column(scale=3):
            gr.Markdown("""C4AI Command R+ is a research open weights release of a 104B billion parameter with highly advanced Retrieval Augmented Generation (RAG) capabilities, tool Use to automate sophisticated tasks, and is multilingual in 10 languages: English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Arabic, and Chinese. Command R+ is optimized for a variety of use cases including reasoning, summarization, and question answering.
            <br/><br/>
            **Model**: [c4ai-command-r-plus](https://huggingface.co/CohereForAI/c4ai-command-r-plus)
            <br/> 
            **Developed by**: [Cohere](https://cohere.com/) and [Cohere for AI](https://cohere.com/research)
            <br/>
            **License**: [CC-BY-NC](https://cohere.com/c4ai-cc-by-nc-license), requires also adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy)
            """
            )
            
    with gr.Column():
        with gr.Row():
            chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True)
        
        with gr.Row():
            user_message = gr.Textbox(lines=1, placeholder="Ask anything ...", label="Input", show_label=False)

      
        with gr.Row():
            submit_button = gr.Button("Submit")
            clear_button = gr.Button("Clear chat")

                        
        history = gr.State([])
        
        user_message.submit(fn=generate_response, inputs=[user_message, cid, history], outputs=[chatbot, history, cid], concurrency_limit=32)

        submit_button.click(fn=generate_response, inputs=[user_message, cid, history], outputs=[chatbot, history, cid], concurrency_limit=32)
        clear_button.click(fn=clear_chat, inputs=None, outputs=[chatbot, history, cid], concurrency_limit=32)

        user_message.submit(lambda x: gr.update(value=""), None, [user_message], queue=False)
        submit_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
        clear_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
        
        with gr.Row():
            gr.Examples(
                examples=examples,
                inputs=[user_message],
                cache_examples=False,
                fn=trigger_example,
                outputs=[chatbot],
                examples_per_page=100
            )

if __name__ == "__main__":
    demo.queue(api_open=False, max_size=40).launch(show_api=False)