File size: 10,035 Bytes
c235080
 
 
 
92f4773
c235080
06b668f
 
 
c235080
 
 
 
06b668f
92f4773
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06b668f
92f4773
 
 
 
 
 
 
 
 
 
 
 
c235080
 
92f4773
06b668f
92f4773
 
 
 
c235080
 
92f4773
 
df72128
92f4773
06b668f
92f4773
 
 
06b668f
 
 
92f4773
 
 
 
06b668f
 
 
92f4773
06b668f
92f4773
 
 
 
 
ecc92e9
06b668f
 
 
 
df72128
b1b106a
92f4773
 
 
758c2de
92f4773
 
 
 
758c2de
92f4773
758c2de
92f4773
06b668f
 
 
 
 
 
92f4773
 
 
06b668f
92f4773
 
 
06b668f
92f4773
 
 
 
 
 
 
 
 
 
 
 
 
b1b106a
 
 
06b668f
 
92f4773
 
 
 
 
 
 
 
 
 
 
19f2817
92f4773
06b668f
 
92f4773
06b668f
 
 
 
 
92f4773
06b668f
92f4773
06b668f
92f4773
 
 
06b668f
 
 
 
 
 
 
92f4773
 
 
06b668f
 
 
92f4773
 
 
 
 
 
 
 
06b668f
92f4773
 
 
 
 
df72128
b1b106a
 
 
92f4773
 
 
06b668f
92f4773
b1b106a
06b668f
b1b106a
06b668f
 
b1b106a
 
 
df72128
 
 
b1b106a
df72128
06b668f
 
 
 
 
19f2817
06b668f
92f4773
06b668f
92f4773
 
 
 
 
 
06b668f
92f4773
 
19f2817
 
92f4773
 
 
06b668f
19f2817
 
b1b106a
 
 
 
92f4773
 
 
 
b1b106a
92f4773
 
b1b106a
19f2817
 
 
b1b106a
 
19f2817
 
 
92f4773
19f2817
 
 
 
92f4773
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
from llama_cpp import Llama
import gradio as gr
import os
import requests
import time

# Percorso locale del modello - Qwen2.5-0.5B-Instruct VELOCE
MODEL_PATH = "qwen2.5-0.5b-instruct-q4_k_m.gguf"
MODEL_URL = "https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct-GGUF/resolve/main/qwen2.5-0.5b-instruct-q4_k_m.gguf"

def download_model():
    """Scarica il modello se non esiste già"""
    if not os.path.exists(MODEL_PATH):
        print("📥 Downloading Qwen2.5-0.5B-Instruct model...")
        try:
            response = requests.get(MODEL_URL, stream=True, timeout=300)
            response.raise_for_status()
            
            total_size = int(response.headers.get('content-length', 0))
            downloaded = 0
            
            with open(MODEL_PATH, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)
                        downloaded += len(chunk)
                        if total_size > 0:
                            progress = (downloaded / total_size) * 100
                            print(f"📥 Download progress: {progress:.1f}%")
            
            # Verifica che il file sia completo
            if os.path.getsize(MODEL_PATH) < 100000:  # Almeno 100KB
                print("❌ Downloaded file seems corrupted")
                os.remove(MODEL_PATH)
                return False
            
            print("✅ Model downloaded successfully!")
            return True
            
        except Exception as e:
            print(f"❌ Error downloading model: {e}")
            if os.path.exists(MODEL_PATH):
                os.remove(MODEL_PATH)  # Rimuovi file corrotto
            return False
    else:
        print("✅ Model already exists!")
        # Verifica che il file esistente sia valido
        if os.path.getsize(MODEL_PATH) < 100000:
            print("❌ Existing file seems corrupted, re-downloading...")
            os.remove(MODEL_PATH)
            return download_model()  # Riprova
        return True

# Scarica il modello
model_loaded = download_model()
llm = None  # Inizializza a None

if model_loaded:
    # Inizializza il modello SUPER OTTIMIZZATO con Qwen2.5-0.5B
    try:
        llm = Llama(
            model_path=MODEL_PATH,
            n_ctx=2048,  # Aumentato grazie al modello più piccolo
            n_threads=4,  # Più thread possibili con modello piccolo
            n_batch=256,  # Batch size ottimizzato
            use_mlock=False,  # Disabilitato per HF Free
            verbose=False,
            n_gpu_layers=0,
            use_mmap=True,  # Usa memory mapping per efficienza
            low_vram=True,  # Modalità low memory
            rope_scaling_type=1,  # Ottimizzazione RoPE
            rope_freq_base=10000.0
        )
        print("✅ Qwen2.5-0.5B Model loaded successfully!")
    except Exception as e:
        print(f"❌ Error loading model: {e}")
        llm = None
else:
    print("❌ Model not available, using fallback responses")

# System prompt OTTIMIZZATO per Qwen2.5
system_prompt = """<|im_start|>system
You are an expert D&D Dungeon Master. Create immersive, engaging adventures with vivid descriptions. Always end your responses with a question or choice for the player. Keep responses concise but atmospheric.
<|im_end|>"""

def generate_random_opening():
    """Genera un inizio casuale per l'avventura usando l'AI"""
    if llm is None:
        # Fallback solo se il modello non è disponibile
        import random
        openings = [
            "You enter a torch-lit dungeon. Water drips from ancient stones. A passage splits left and right. Which way?",
            "You're in a misty forest clearing. An old well sits in the center, rope disappearing into darkness. Investigate?",
            "The tavern door creaks open. Hooded figures look up from their ale. The barkeep waves you over. Approach?"
        ]
        return f"🌟 **New Adventure!** 🌟\n\n{random.choice(openings)}"
    
    try:
        # Prompt ottimizzato per Qwen2.5
        opening_prompt = f"""{system_prompt}
<|im_start|>user
Generate a creative D&D adventure opening in 2-3 sentences. Set an intriguing scene and end with a question for the player.
<|im_end|>
<|im_start|>assistant"""
        
        output = llm(
            opening_prompt,
            max_tokens=80,  # Leggermente più alto per qualità
            temperature=0.8,
            top_p=0.9,
            repeat_penalty=1.1,
            stop=["<|im_end|>", "<|im_start|>", "User:", "Player:"]
        )
        
        opening = output["choices"][0]["text"].strip()
        
        # Assicurati che finisca con una domanda
        if not opening.endswith('?'):
            opening += " What do you do?"
        
        return f"🌟 **New Adventure!** 🌟\n\n{opening}"
        
    except Exception as e:
        print(f"Error generating opening: {e}")
        return f"🌟 **New Adventure!** 🌟\n\nYou find yourself in a mysterious place. Strange things are happening. What do you do?"

chat_history = []

def generate_dm_response_with_timeout(message, timeout=30):
    """Genera risposta con timeout ridotto per velocità"""
    if llm is None:
        # Fallback responses se il modello non è disponibile
        import random
        fallbacks = [
            "The path ahead is unclear. What's your next move?",
            "You hear footsteps approaching. How do you react?",
            "A mysterious door appears before you. Do you open it?",
            "The ground trembles slightly. What do you do?",
            "You find a strange artifact. Examine it closely?"
        ]
        return random.choice(fallbacks)
    
    try:
        # Prompt ottimizzato per Qwen2.5 con chat template
        prompt = f"{system_prompt}\n"
        
        # Mantieni più contesto grazie al modello efficiente
        context_turns = min(len(chat_history), 3)  # Ultimi 3 turni
        for turn in chat_history[-context_turns:]:
            prompt += f"<|im_start|>user\n{turn['user']}\n<|im_end|>\n"
            prompt += f"<|im_start|>assistant\n{turn['ai']}\n<|im_end|>\n"
        
        prompt += f"<|im_start|>user\n{message}\n<|im_end|>\n<|im_start|>assistant\n"
        
        # Parametri ottimizzati per Qwen2.5-0.5B
        start_time = time.time()
        output = llm(
            prompt, 
            max_tokens=100,  # Aumentato per qualità migliore
            stop=["<|im_end|>", "<|im_start|>", "User:", "Player:"],
            temperature=0.7,
            top_p=0.8,
            repeat_penalty=1.2,
            top_k=40,
            min_p=0.1  # Miglior controllo qualità
        )
        
        # Verifica se ha impiegato troppo tempo
        elapsed_time = time.time() - start_time
        if elapsed_time > timeout:
            print(f"Response took {elapsed_time:.1f}s (timeout: {timeout}s)")
            return "Time passes quickly. What do you do next?"
        
        text = output["choices"][0]["text"].strip()
        
        # Assicurati che ci sia sempre una domanda
        if not text.endswith(('?', '!', '.')):
            text += "?"
        
        print(f"✅ Response generated in {elapsed_time:.1f}s")
        return text
        
    except Exception as e:
        print(f"Error generating response: {e}")
        return "Something unexpected happens. What do you do next?"

def chat(message, history):
    global chat_history
    
    if not message.strip():
        return "You stand there, unsure. What would you like to do?"
    
    # Genera risposta del DM con timeout ridotto
    dm_response = generate_dm_response_with_timeout(message)
    
    # Aggiorna cronologia (mantieni più turni grazie al modello efficiente)
    chat_history.append({"user": message, "ai": dm_response})
    if len(chat_history) > 5:  # Mantieni 5 turni invece di 2
        chat_history = chat_history[-5:]
    
    return dm_response

def reset():
    global chat_history
    chat_history = []
    return generate_random_opening()

# Crea l'interfaccia SUPER OTTIMIZZATA
with gr.Blocks(title="Infinite Dungeon - Lightning Fast", theme=gr.themes.Soft()) as demo:
    gr.Markdown("# ⚡ Infinite Dungeon - Lightning Fast")
    gr.Markdown("*Powered by Qwen2.5-0.5B - Optimized for 5-15 second responses*")
    gr.Markdown("🚀 **Super fast AI D&D with perfect memory retention**")
    
    # Inizializza la chat
    chatbot = gr.Chatbot(
        value=[(None, "⚡ **Lightning Fast Adventure Ready!** ⚡\n\nPress 'New Adventure' to begin your quest!")],
        height=400,
        show_label=False
    )
    
    msg = gr.Textbox(
        label="Your action", 
        placeholder="What do you do? (e.g., 'I search the room', 'I attack the orc', 'I cast a spell')",
        max_lines=2
    )
    
    with gr.Row():
        submit = gr.Button("⚔️ Act", variant="primary", size="lg")
        reset_btn = gr.Button("🔄 New Adventure", variant="secondary")
    
    gr.Markdown("⚡ **Ultra-fast responses**: 5-15 seconds | 🧠 **Perfect memory**: Never forgets your adventure!")
    
    # Funzione per gestire la chat
    def respond(message, chat_history_ui):
        if not message.strip():
            return "", chat_history_ui
        
        # Mostra messaggio di caricamento
        chat_history_ui.append((message, "🎲 *The DM is thinking...*"))
        
        # Genera risposta
        bot_message = chat(message, chat_history_ui)
        chat_history_ui[-1] = (message, bot_message)
        
        return "", chat_history_ui
    
    # Funzione per il reset
    def reset_chat():
        new_opening = reset()
        return [(None, new_opening)]
    
    # Collegamenti eventi
    msg.submit(respond, [msg, chatbot], [msg, chatbot])
    submit.click(respond, [msg, chatbot], [msg, chatbot])
    reset_btn.click(reset_chat, outputs=[chatbot])

# Avvia l'app
if __name__ == "__main__":
    demo.launch(
        server_name="0.0.0.0",
        server_port=7860,
        show_error=True
    )