RockSky1 commited on
Commit
3877120
·
verified ·
1 Parent(s): 77a0783

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -40
app.py CHANGED
@@ -2,36 +2,32 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import random
4
 
5
- # Identity & Brand
6
- NAME = "Infinity"
7
- DESCRIPTION = "Next-gen Multimodal Reasoning & Creative Engine"
8
-
9
- # Connecting to Free Powerful Cloud Models
10
- # Gemma 4 for Logic/Chat & FLUX for Images
11
- client_chat = InferenceClient("google/gemma-4-2b-it")
12
- client_image = InferenceClient("black-forest-labs/FLUX.1-schnell")
13
 
14
  def infinity_engine(message, history):
15
  user_msg = message.lower()
16
 
17
- # --- STEP 1: IMAGE GENERATION LOGIC ---
18
- image_triggers = ["generate", "make", "create", "draw", "photo", "image", "banao", "paint"]
19
  if any(word in user_msg for word in image_triggers):
20
- yield "Infinity is painting your imagination... 🎨"
21
  try:
 
22
  seed = random.randint(0, 1000000)
23
  img = client_image.text_to_image(message, seed=seed)
24
  yield img
25
  return
26
- except Exception as e:
27
- yield f"Error: {str(e)}. Try again in a moment."
28
  return
29
 
30
- # --- STEP 2: SMART CHAT LOGIC (Gemma 4) ---
31
- system_prompt = f"System: Your name is {NAME}. You are an advanced AI Agent created by RockSky1. You are logical, fast, and multimodal. Never mention Google or Gemma. Identify only as {NAME}."
32
-
33
  messages = [{"role": "system", "content": system_prompt}]
34
- # Adding history for memory
35
  for val in history:
36
  if val[0]: messages.append({"role": "user", "content": val[0]})
37
  if val[1]: messages.append({"role": "assistant", "content": val[1]})
@@ -39,29 +35,16 @@ def infinity_engine(message, history):
39
 
40
  response = ""
41
  try:
42
- for token in client_chat.chat_completion(messages, max_tokens=1000, stream=True):
43
- token_str = token.choices[0].delta.content
44
- response += token_str
45
- yield response
46
- except Exception as e:
47
- yield "Infinity is currently thinking deeply. Please retry."
48
 
49
- # --- STEP 3: PREMIUM UI DESIGN ---
50
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo", secondary_hue="blue")) as demo:
51
- gr.Markdown(f"# ♾️ {NAME} AI")
52
- gr.Markdown(f"*{DESCRIPTION}*")
53
-
54
- chat_ui = gr.ChatInterface(
55
- fn=infinity_engine,
56
- chatbot=gr.Chatbot(height=500, avatar_images=[None, "https://huggingface.co/front/assets/huggingface_logo-noborder.svg"]),
57
- title=f"{NAME} v2.0",
58
- examples=[
59
- "Who are you?",
60
- "Generate a futuristic 3D avatar of a gamer",
61
- "Write a Python script for a discord bot",
62
- "What is the future of Multimodal AI?"
63
- ],
64
- cache_examples=False,
65
- )
66
 
67
  demo.launch()
 
2
  from huggingface_hub import InferenceClient
3
  import random
4
 
5
+ # 1. Multiple Models for Stability (Agar ek fail ho toh dusra chale)
6
+ # Chat ke liye Mistral 7B (Bahut stable hai free tier pe)
7
+ client_chat = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
8
+ # Image ke liye Stable Diffusion (FLUX agar busy ho toh ye chalega)
9
+ client_image = InferenceClient("stabilityai/stable-diffusion-xl-base-1.0")
 
 
 
10
 
11
  def infinity_engine(message, history):
12
  user_msg = message.lower()
13
 
14
+ # CASE 1: Image Generation
15
+ image_triggers = ["generate", "make", "create", "draw", "photo", "image", "banao"]
16
  if any(word in user_msg for word in image_triggers):
17
+ yield "Infinity is painting... 🎨"
18
  try:
19
+ # Random seed taaki har baar nayi image bane
20
  seed = random.randint(0, 1000000)
21
  img = client_image.text_to_image(message, seed=seed)
22
  yield img
23
  return
24
+ except Exception:
25
+ yield "Infinity: Image server busy hai, 10 second baad phir try karein."
26
  return
27
 
28
+ # CASE 2: Text Chat
29
+ system_prompt = "You are Infinity, a powerful AI by RockSky1. Be cool and smart."
 
30
  messages = [{"role": "system", "content": system_prompt}]
 
31
  for val in history:
32
  if val[0]: messages.append({"role": "user", "content": val[0]})
33
  if val[1]: messages.append({"role": "assistant", "content": val[1]})
 
35
 
36
  response = ""
37
  try:
38
+ # Stream=False rakhenge taaki connection break na ho
39
+ result = client_chat.chat_completion(messages, max_tokens=500)
40
+ response = result.choices[0].message.content
41
+ yield response
42
+ except Exception:
43
+ yield "Infinity: Connection thoda slow hai, ek baar phir se message bhejo bhai."
44
 
45
+ # UI Setup
46
+ with gr.Blocks(theme=gr.themes.Default()) as demo:
47
+ gr.Markdown("# ♾️ INFINITY AI")
48
+ gr.ChatInterface(fn=infinity_engine)
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
  demo.launch()