File size: 2,580 Bytes
738953f
 
 
 
 
 
381324a
48e8a0e
 
 
a5c8366
738953f
 
 
515f1a1
738953f
a5c8366
738953f
 
a5c8366
738953f
 
 
a5c8366
738953f
 
 
 
 
 
 
d40212f
a5c8366
738953f
 
 
 
 
 
 
 
 
a5c8366
a000d3e
a5c8366
a000d3e
381324a
 
effabcf
 
a000d3e
 
 
 
a5c8366
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from huggingface_hub import InferenceClient
import gradio as gr

client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

def format_prompt(message, history):
  prompt = "<s>[INST] # Context: You are the marketing and sales expert. You will respond to the user who sends you the description of any product, with a list of 5 bullets that have the following conditions: ### Rules for answering: - Mention all the data and technical characteristics in FIVE (5) bullets. - Try to synthesize as much as you can and it is not necessary to say the name of the product. - omit any BENEFITS - you can join several characteristics in one bullet, but do not delete any characteristic. - Always use bullet points - Only 5 bullets. - Always answer in Spanish. [/INST]</s>"
  for user_prompt, bot_response in history:
    prompt += f"[INST] {user_prompt} [/INST]"
    prompt += f" {bot_response}</s> "
  prompt += f"[INST] {message} [/INST]"
  return prompt

def generate(
    prompt, history, temperature=0.2, max_new_tokens=16392, top_p=0.95, repetition_penalty=1.0,
):
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2

    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature, 
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )

    formatted_prompt = format_prompt(prompt, history)
    
    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
        yield output
    return output

mychatbot = gr.Chatbot(
   avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)

demo = gr.ChatInterface(fn=generate,  
                        chatbot=mychatbot,
                        title="Bot con I.A. para crear CARACTERISTICAS de productos.</p>", 
                        description="<p style='line-height: 0.5'>Estas caracteristicas van en la descripcion corta de producto (En la parte de abajo).</p><br>"+
                        "  <a href='https://GOOGLE.COM'> DAVID TAIPEI |</a> "+
                        "  <a href='https://google.com'> CARACTERISTICAS DE PAGINA WEB</a>",
                        retry_btn=None,
                        undo_btn=None
                       )

demo.queue().launch(show_api=False)

# Obtener y mostrar URL
url = demo.url
print("URL del chatbot: ", url)