File size: 3,330 Bytes
b587436
 
ef9db33
b587436
 
 
 
 
 
ef9db33
b209e56
ef9db33
 
b587436
 
 
 
 
ef9db33
b587436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef9db33
 
 
 
 
 
 
b587436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef9db33
 
 
 
9f054c9
ef9db33
 
 
 
b587436
ef9db33
 
b587436
428f54a
 
 
 
 
 
89a1ee5
428f54a
 
 
 
 
 
 
d2fefc2
428f54a
 
b587436
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import os
import gradio as gr
import google.generativeai as genai
from gradio_client import Client, file
from dotenv import load_dotenv

# Load environment variables from .env file
load_dotenv()

# Retrieve API key from environment variable
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")

# Retrieve system content from environment variable
SYSTEM_CONTENT = os.getenv("SYSTEM_CONTENT")

# Configure Google Gemini API
genai.configure(api_key=GEMINI_API_KEY)

# Create the model
generation_config = {
    "temperature": 0.7,
    "top_p": 0.95,
    "top_k": 64,
    "max_output_tokens": 512,  # Adjust as needed
    "response_mime_type": "text/plain",
}

# Define safety settings for the model
safety_settings = [
    {
        "category": "HARM_CATEGORY_HARASSMENT",
        "threshold": "BLOCK_NONE"
    },
    {
        "category": "HARM_CATEGORY_HATE_SPEECH",
        "threshold": "BLOCK_NONE"
    },
    {
        "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
        "threshold": "BLOCK_NONE"
    },
    {
        "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
        "threshold": "BLOCK_NONE"
    }
]

# Create the generative model (outside the function)
model = genai.GenerativeModel(
    model_name="gemini-1.5-pro", 
    generation_config=generation_config,
    safety_settings=safety_settings,
    system_instruction=SYSTEM_CONTENT,
)

# Initialize Gradio client for new TTS API (outside the function)
try:
    tts_client = Client("tonyassi/voice-clone")
except ValueError as e:
    print(f"Error initializing TTS client: {e}")
    tts_client = None

def generate_response(user_input, chat_history):
    """Generates a response based on user input and chat history."""

    # Add user input to history
    chat_history.append(user_input)

    # Limit history length
    if len(chat_history) > 10:
        chat_history = chat_history[-10:]

    # Start a new chat session
    chat_session = model.start_chat()

    # Send the entire chat history as the first message
    response = chat_session.send_message("\n".join(chat_history))

    if tts_client:
        # Use the new Gradio TTS API 
        tts_result = tts_client.predict(
            text=response.text,  
            audio=file('audio.mp3'),  # Use local audio file
            api_name="/predict"
        )
    else:
        tts_result = None

    # Return response and audio, and update chat history
    return response.text, tts_result, chat_history



with gr.Blocks() as iface:
    gr.Interface(
        fn=generate_response,
        inputs=[
            gr.Textbox(lines=2, label="Chat with AI Donald Trump", placeholder="Enter your message here..."),
            gr.State([])  # State input for chat history
        ],
        outputs=[
            gr.Textbox(label="Response"),
            gr.Audio(label="Voice Output", interactive=False, autoplay=True) if tts_client else gr.Textbox(label="Voice Output not available"),
            gr.State([])  # State output to update chat history 
        ],
        title="AI Donald Trump",
        description="Contact me if you want another character/voice<br>WhatsApp me: +92-332-4399819<br> Email me: aheedsajid@gmail.com<br><b>Donate something to increase GPU power</b><br>[Click here to Donate](https://nowpayments.io/donation/aheed)<br>Please duplicate the space if you get an error!"
    )

iface.launch()