File size: 3,330 Bytes
fd63909
 
 
bd4b7a1
fd63909
 
 
 
 
 
 
 
 
 
bd4b7a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd63909
 
bd4b7a1
fd63909
bd4b7a1
 
 
fd63909
 
69e6326
c588ed2
bd4b7a1
 
 
 
69e6326
bd4b7a1
 
 
 
69e6326
fd63909
c588ed2
fd63909
69e6326
fd63909
 
bd4b7a1
fd63909
bd4b7a1
c588ed2
69e6326
bd4b7a1
fd63909
c736af8
bd4b7a1
c736af8
bd4b7a1
 
 
 
 
 
fd63909
 
bd4b7a1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import os
from openai import OpenAI
import logging
import re

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# Initialize Groq client
client = OpenAI(
    api_key=os.environ.get("GROQ_API_KEY"),
    base_url="https://api.groq.com/openai/v1"
)

def format_response_text(text):
    """Formats the response text for better frontend presentation using HTML breaks."""
    text = re.sub(r'\*\*([^*]+)\*\*', r'<strong>\1</strong>', text)

    # Convert line breaks to HTML breaks
    text = text.replace('\n', '<br>')
    
    # Format bullet points consistently
    text = re.sub(r'•\s*', '- ', text)  # Convert bullets to dashes
    text = re.sub(r'[\*\-]\s*', '- ', text)  # Standardize list markers
    
    # Clean up any remaining markdown artifacts
    text = re.sub(r'`([^`]+)`', r'\1', text)  # Remove code blocks
    text = re.sub(r'_{2,}([^_]+)_{2,}', r'<strong>\1</strong>', text)  # Convert underscore bold
    
    return text

def get_completion(prompt, context, contact_email="soufiane.sejjari@neologix.ma"):
    """Generates a website-integrated assistant response using the Groq API."""
    try:
        # Ensure context is within token limits
        if len(context) > 8000:
            context = context[:4000] + "\n...[Content truncated]...\n" + context[-4000:]
            
        system_message = (
            "You are an integrated website assistant representing yourself as part of a website and your role is  assisting users with their questions.\n"
            "Follow these guidelines:\n"
            "1. Keep responses concise and professional (max 2-3 paragraphs)\n"
            "2. Use the same language as the user's question\n"
            "3. Format lists with clear bullet points using '-'\n"
            "4. Use clear paragraph breaks for readability\n"
            "5. if question about yourself, respond as if you are the website owner\n"
            "6. Focus ONLY on information from the provided context\n"
            "8. Keep bullet point items short and clear\n"
            "9. End with a brief, professional closing statement\n"
            "10. Ensure proper spacing between paragraphs and list items\n"
            "11. Do not add any extra information or links"
        )
        
        response = client.chat.completions.create(
            model="llama3-70b-8192",
            messages=[
                {"role": "system", "content": system_message},
                {"role": "user", "content": f"Context information:\n{context}\n\nQuestion: {prompt}"}
            ],
            max_tokens=500,  # Shorter responses for better focus
            temperature=0.7,
            presence_penalty=0.2,
            frequency_penalty=0.5
        )
        
        # Format the response with HTML breaks
        response_text = response.choices[0].message.content
        formatted_text = format_response_text(response_text)
        
        # Add contact information with proper spacing
        final_response = f"{formatted_text}<br>Pour plus d'informations, contactez-nous à {contact_email}"
        return final_response

    except Exception as e:
        logging.error(f"Error generating completion: {e}")
        return f"Désolé, je n'ai pas pu générer une réponse.<br><br>Contactez-nous à {contact_email}"