Datasets:

Modalities:
Text
Formats:
text
Size:
< 1K
Libraries:
Datasets
License:
teszenofficial commited on
Commit
f83d491
·
verified ·
1 Parent(s): 4bb291f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +575 -0
app.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ MTP 1.0 API - RESPUESTAS COMPLETAS
4
+ - Sin cortes artificiales
5
+ - El modelo decide cuándo terminar
6
+ - Respuestas naturales y coherentes
7
+ - Máximo 250 tokens (suficiente para respuestas completas)
8
+ """
9
+
10
+ import os
11
+ import sys
12
+ import torch
13
+ import json
14
+ import time
15
+ import gc
16
+ import re
17
+ from fastapi import FastAPI
18
+ from fastapi.responses import HTMLResponse
19
+ from fastapi.middleware.cors import CORSMiddleware
20
+ from pydantic import BaseModel, Field
21
+ from huggingface_hub import snapshot_download
22
+ import uvicorn
23
+ import math
24
+ import torch.nn as nn
25
+ import torch.nn.functional as F
26
+ import sentencepiece as spm
27
+
28
+ # ======================
29
+ # OPTIMIZACIONES
30
+ # ======================
31
+ if torch.cuda.is_available():
32
+ DEVICE = "cuda"
33
+ print("✅ GPU detectada")
34
+ else:
35
+ DEVICE = "cpu"
36
+ torch.set_num_threads(min(2, os.cpu_count() or 2))
37
+ torch.set_num_interop_threads(1)
38
+ torch.set_grad_enabled(False)
39
+ print("⚠️ Usando CPU optimizado")
40
+
41
+ MODEL_REPO = "TeszenAI/dango"
42
+
43
+ # ======================
44
+ # ARQUITECTURA MTP 1.0
45
+ # ======================
46
+ class RMSNorm(nn.Module):
47
+ __slots__ = ('weight', 'eps')
48
+ def __init__(self, d_model, eps=1e-6):
49
+ super().__init__()
50
+ self.weight = nn.Parameter(torch.ones(d_model))
51
+ self.eps = eps
52
+ def forward(self, x):
53
+ rms = torch.sqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
54
+ return self.weight * (x / rms)
55
+
56
+ class SwiGLU(nn.Module):
57
+ __slots__ = ('w1', 'w2', 'w3')
58
+ def __init__(self, d_model, d_ff):
59
+ super().__init__()
60
+ self.w1 = nn.Linear(d_model, d_ff, bias=False)
61
+ self.w2 = nn.Linear(d_ff, d_model, bias=False)
62
+ self.w3 = nn.Linear(d_model, d_ff, bias=False)
63
+ def forward(self, x):
64
+ return self.w2(F.silu(self.w1(x)) * self.w3(x))
65
+
66
+ class RotaryPositionalEmbedding(nn.Module):
67
+ __slots__ = ('inv_freq',)
68
+ def __init__(self, d_model, max_len=512):
69
+ super().__init__()
70
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, d_model, 2).float() / d_model))
71
+ self.register_buffer('inv_freq', inv_freq)
72
+ def forward(self, x, seq_len=None):
73
+ if seq_len is None:
74
+ seq_len = x.shape[1]
75
+ t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq)
76
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
77
+ emb = torch.cat((freqs, freqs), dim=-1)
78
+ return torch.cos(emb), torch.sin(emb)
79
+
80
+ class RotaryMultiHeadAttention(nn.Module):
81
+ __slots__ = ('n_heads', 'd_k', 'w_q', 'w_k', 'w_v', 'w_o', 'dropout', 'scale', 'rotary')
82
+ def __init__(self, d_model, n_heads, dropout=0.1):
83
+ super().__init__()
84
+ assert d_model % n_heads == 0
85
+ self.n_heads = n_heads
86
+ self.d_k = d_model // n_heads
87
+ self.w_q = nn.Linear(d_model, d_model, bias=False)
88
+ self.w_k = nn.Linear(d_model, d_model, bias=False)
89
+ self.w_v = nn.Linear(d_model, d_model, bias=False)
90
+ self.w_o = nn.Linear(d_model, d_model, bias=False)
91
+ self.dropout = nn.Dropout(dropout)
92
+ self.scale = math.sqrt(self.d_k)
93
+ self.rotary = RotaryPositionalEmbedding(self.d_k)
94
+ def forward(self, x, mask=None):
95
+ b, s, _ = x.shape
96
+ cos, sin = self.rotary(x, s)
97
+ Q = self.w_q(x).view(b, s, self.n_heads, self.d_k).transpose(1, 2)
98
+ K = self.w_k(x).view(b, s, self.n_heads, self.d_k).transpose(1, 2)
99
+ V = self.w_v(x).view(b, s, self.n_heads, self.d_k).transpose(1, 2)
100
+ Q_rot = Q * cos.unsqueeze(0).unsqueeze(0) + self._rotate_half(Q) * sin.unsqueeze(0).unsqueeze(0)
101
+ K_rot = K * cos.unsqueeze(0).unsqueeze(0) + self._rotate_half(K) * sin.unsqueeze(0).unsqueeze(0)
102
+ scores = torch.matmul(Q_rot, K_rot.transpose(-2, -1)) / self.scale
103
+ if mask is not None:
104
+ scores = scores.masked_fill(mask == 0, float('-inf'))
105
+ attn = self.dropout(F.softmax(scores, dim=-1))
106
+ out = torch.matmul(attn, V).transpose(1, 2).contiguous().view(b, s, -1)
107
+ return self.w_o(out)
108
+ def _rotate_half(self, x):
109
+ x1, x2 = x.chunk(2, dim=-1)
110
+ return torch.cat((-x2, x1), dim=-1)
111
+
112
+ class TransformerBlock(nn.Module):
113
+ __slots__ = ('attn', 'ff', 'norm1', 'norm2', 'dropout1', 'dropout2')
114
+ def __init__(self, d_model, n_heads, d_ff, dropout=0.1):
115
+ super().__init__()
116
+ self.attn = RotaryMultiHeadAttention(d_model, n_heads, dropout)
117
+ self.ff = SwiGLU(d_model, d_ff)
118
+ self.norm1 = RMSNorm(d_model)
119
+ self.norm2 = RMSNorm(d_model)
120
+ self.dropout1 = nn.Dropout(dropout)
121
+ self.dropout2 = nn.Dropout(dropout)
122
+ def forward(self, x, mask=None):
123
+ x = x + self.dropout1(self.attn(self.norm1(x), mask))
124
+ x = x + self.dropout2(self.ff(self.norm2(x)))
125
+ return x
126
+
127
+ class MTP1Model(nn.Module):
128
+ def __init__(self, vocab_size, d_model=512, n_heads=16, n_layers=8, d_ff=2048, dropout=0.1, max_len=512):
129
+ super().__init__()
130
+ self.vocab_size = vocab_size
131
+ self.d_model = d_model
132
+ self.max_len = max_len
133
+ self.embedding = nn.Embedding(vocab_size, d_model)
134
+ self.blocks = nn.ModuleList([TransformerBlock(d_model, n_heads, d_ff, dropout) for _ in range(n_layers)])
135
+ self.norm = RMSNorm(d_model)
136
+ self.lm_head = nn.Linear(d_model, vocab_size)
137
+ self.dropout = nn.Dropout(dropout)
138
+ def forward(self, x):
139
+ seq_len = x.size(1)
140
+ mask = torch.tril(torch.ones(seq_len, seq_len)).unsqueeze(0).unsqueeze(0).to(x.device)
141
+ x = self.embedding(x) * math.sqrt(self.d_model)
142
+ x = self.dropout(x)
143
+ for block in self.blocks:
144
+ x = block(x, mask)
145
+ return self.lm_head(self.norm(x))
146
+
147
+ @torch.no_grad()
148
+ def generate(self, input_ids, max_new=200, temperature=0.45, top_k=30, top_p=0.88, repetition_penalty=1.2):
149
+ """Generación sin cortes artificiales - el modelo decide cuándo parar"""
150
+ generated = input_ids
151
+ eos_id = 3
152
+ last_tokens = []
153
+
154
+ for step in range(max_new):
155
+ if generated.size(1) > self.max_len:
156
+ context = generated[:, -self.max_len:]
157
+ else:
158
+ context = generated
159
+ logits = self(context)
160
+ next_logits = logits[0, -1, :].clone() / temperature
161
+
162
+ if repetition_penalty != 1.0:
163
+ for token_id in set(generated[0].tolist()):
164
+ next_logits[token_id] /= repetition_penalty
165
+
166
+ if top_k > 0:
167
+ indices = next_logits < torch.topk(next_logits, top_k)[0][..., -1, None]
168
+ next_logits[indices] = float('-inf')
169
+ if top_p < 1.0:
170
+ sorted_logits, sorted_indices = torch.sort(next_logits, descending=True)
171
+ cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
172
+ remove = cum_probs > top_p
173
+ remove[..., 1:] = remove[..., :-1].clone()
174
+ remove[..., 0] = 0
175
+ indices = sorted_indices[remove]
176
+ next_logits[indices] = float('-inf')
177
+
178
+ probs = F.softmax(next_logits, dim=-1)
179
+ next_token = torch.multinomial(probs, 1).item()
180
+
181
+ last_tokens.append(next_token)
182
+ if len(last_tokens) > 6 and len(set(last_tokens)) <= 2:
183
+ break
184
+
185
+ if next_token == eos_id or next_token == 0:
186
+ break
187
+
188
+ generated = torch.cat([generated, torch.tensor([[next_token]], device=generated.device)], dim=1)
189
+
190
+ # Parada natural: si encontramos un punto y llevamos suficientes tokens
191
+ if step > 30:
192
+ # Decodificar últimos tokens para ver si hay punto final
193
+ recent = generated[0][-5:].tolist()
194
+ # El token 3 es EOS, 4 podría ser punto dependiendo del tokenizer
195
+ if 3 in recent:
196
+ break
197
+
198
+ return generated
199
+
200
+ # ======================
201
+ # LIMPIEZA MÍNIMA (SOLO LO ESENCIAL)
202
+ # ======================
203
+ def clean_response(response: str) -> str:
204
+ """Solo elimina repeticiones y espacios, NO corta el texto"""
205
+ if not response:
206
+ return ""
207
+
208
+ # Eliminar repeticiones excesivas de palabras
209
+ words = response.split()
210
+ cleaned = []
211
+ last = ""
212
+ for w in words:
213
+ if w.lower() != last.lower():
214
+ cleaned.append(w)
215
+ last = w
216
+ response = " ".join(cleaned)
217
+
218
+ # Limpiar espacios múltiples
219
+ response = re.sub(r'\s+', ' ', response).strip()
220
+
221
+ # Capitalizar primera letra
222
+ if response and response[0].islower():
223
+ response = response[0].upper() + response[1:]
224
+
225
+ # NO cortamos el texto - la respuesta queda completa
226
+ return response
227
+
228
+ # ======================
229
+ # CARGA DEL MODELO
230
+ # ======================
231
+ print(f"📦 Descargando MTP 1.0 desde {MODEL_REPO}...")
232
+ repo_path = snapshot_download(repo_id=MODEL_REPO, repo_type="model", local_dir="mtp_repo")
233
+
234
+ config_path = os.path.join(repo_path, "config.json")
235
+ with open(config_path, "r") as f:
236
+ config = json.load(f)
237
+
238
+ tokenizer_path = os.path.join(repo_path, "mtp_tokenizer.model")
239
+ sp = spm.SentencePieceProcessor()
240
+ sp.load(tokenizer_path)
241
+ config["vocab_size"] = sp.get_piece_size()
242
+
243
+ print(f"🧠 Inicializando MTP 1.0...")
244
+ print(f" → Vocabulario: {config['vocab_size']} tokens")
245
+ print(f" → Dimensiones: {config.get('d_model', 512)}")
246
+ print(f" → Capas: {config.get('n_layers', 8)}")
247
+
248
+ model = MTP1Model(**config)
249
+ model.to(DEVICE)
250
+ model.eval()
251
+
252
+ model_path = os.path.join(repo_path, "mtp_model.pt")
253
+ if os.path.exists(model_path):
254
+ state_dict = torch.load(model_path, map_location=DEVICE)
255
+ model.load_state_dict(state_dict, strict=False)
256
+ print("✅ Pesos cargados")
257
+
258
+ param_count = sum(p.numel() for p in model.parameters())
259
+ print(f"✅ MTP 1.0 listo: {param_count:,} parámetros ({param_count/1e6:.2f}M)")
260
+
261
+ # ======================
262
+ # API
263
+ # ======================
264
+ app = FastAPI(title="MTP 1.0 API", version="1.0")
265
+ app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"])
266
+
267
+ class PromptRequest(BaseModel):
268
+ text: str = Field(..., max_length=2000)
269
+
270
+ def build_prompt(user_input: str) -> str:
271
+ return f"### Instrucción:\n{user_input}\n\n### Respuesta:\n"
272
+
273
+ ACTIVE_REQUESTS = 0
274
+
275
+ @app.post("/generate")
276
+ async def generate(req: PromptRequest):
277
+ global ACTIVE_REQUESTS
278
+ ACTIVE_REQUESTS += 1
279
+
280
+ user_input = req.text.strip()
281
+ if not user_input:
282
+ ACTIVE_REQUESTS -= 1
283
+ return {"reply": ""}
284
+
285
+ tokens = sp.encode(build_prompt(user_input))[:400]
286
+ input_ids = torch.tensor([tokens], device=DEVICE)
287
+
288
+ try:
289
+ start = time.time()
290
+ output_ids = model.generate(
291
+ input_ids,
292
+ max_new=200,
293
+ temperature=0.45,
294
+ top_k=30,
295
+ top_p=0.88,
296
+ repetition_penalty=1.2
297
+ )
298
+ elapsed = time.time() - start
299
+
300
+ gen_tokens = output_ids[0, len(tokens):].tolist()
301
+ safe_tokens = [t for t in gen_tokens if 0 <= t < config["vocab_size"] and t != 0]
302
+
303
+ response = sp.decode(safe_tokens).strip() if safe_tokens else ""
304
+
305
+ # Limpiar formato
306
+ for m in ["### Respuesta:", "Respuesta:", "[/INST]", "Asistente:"]:
307
+ if m in response:
308
+ response = response.split(m)[-1].strip()
309
+ break
310
+
311
+ response = clean_response(response)
312
+
313
+ if len(response) < 3:
314
+ response = "Lo siento, no pude generar una respuesta clara."
315
+
316
+ return {
317
+ "reply": response,
318
+ "time": round(elapsed, 2),
319
+ "tokens": len(safe_tokens),
320
+ "characters": len(response),
321
+ "model": "MTP-1.0"
322
+ }
323
+
324
+ except Exception as e:
325
+ print(f"Error: {e}")
326
+ return {"reply": "Lo siento, ocurrió un error."}
327
+
328
+ finally:
329
+ ACTIVE_REQUESTS -= 1
330
+ if DEVICE == "cuda":
331
+ torch.cuda.empty_cache()
332
+ gc.collect()
333
+
334
+ @app.get("/health")
335
+ def health():
336
+ return {"status": "ok", "model": "MTP-1.0", "device": DEVICE}
337
+
338
+ @app.get("/info")
339
+ def info():
340
+ return {
341
+ "model": "MTP-1.0",
342
+ "version": "1.0",
343
+ "parameters": param_count,
344
+ "parameters_millions": round(param_count / 1e6, 2),
345
+ "device": DEVICE,
346
+ "d_model": config.get('d_model', 512),
347
+ "n_layers": config.get('n_layers', 8),
348
+ "n_heads": config.get('n_heads', 16)
349
+ }
350
+
351
+ # ======================
352
+ # INTERFAZ WEB
353
+ # ======================
354
+ @app.get("/", response_class=HTMLResponse)
355
+ def chat_ui():
356
+ return """
357
+ <!DOCTYPE html>
358
+ <html>
359
+ <head>
360
+ <meta charset="UTF-8">
361
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
362
+ <title>MTP 1.0 - Asistente IA</title>
363
+ <style>
364
+ * { margin: 0; padding: 0; box-sizing: border-box; }
365
+ body {
366
+ background: linear-gradient(135deg, #0a0a0a 0%, #1a1a2e 100%);
367
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
368
+ height: 100vh;
369
+ display: flex;
370
+ flex-direction: column;
371
+ }
372
+ .header {
373
+ padding: 12px 16px;
374
+ background: rgba(0,0,0,0.5);
375
+ border-bottom: 1px solid rgba(255,255,255,0.1);
376
+ }
377
+ .header h1 { color: white; font-size: 1rem; }
378
+ .header p { color: #888; font-size: 0.65rem; margin-top: 2px; }
379
+ .messages {
380
+ flex: 1;
381
+ overflow-y: auto;
382
+ padding: 16px;
383
+ display: flex;
384
+ flex-direction: column;
385
+ gap: 10px;
386
+ }
387
+ .message {
388
+ max-width: 85%;
389
+ padding: 8px 14px;
390
+ border-radius: 16px;
391
+ font-size: 0.85rem;
392
+ line-height: 1.4;
393
+ animation: fadeIn 0.2s ease;
394
+ word-wrap: break-word;
395
+ }
396
+ @keyframes fadeIn {
397
+ from { opacity: 0; transform: translateY(5px); }
398
+ to { opacity: 1; transform: translateY(0); }
399
+ }
400
+ .user {
401
+ background: linear-gradient(135deg, #4a9eff, #3a7ecc);
402
+ color: white;
403
+ align-self: flex-end;
404
+ border-radius: 16px 4px 16px 16px;
405
+ }
406
+ .bot {
407
+ background: rgba(30, 31, 40, 0.95);
408
+ color: #e0e0e0;
409
+ align-self: flex-start;
410
+ border-radius: 4px 16px 16px 16px;
411
+ border: 1px solid rgba(255,255,255,0.05);
412
+ }
413
+ .input-area {
414
+ padding: 12px 16px;
415
+ background: rgba(0,0,0,0.5);
416
+ border-top: 1px solid rgba(255,255,255,0.1);
417
+ display: flex;
418
+ gap: 10px;
419
+ }
420
+ input {
421
+ flex: 1;
422
+ padding: 10px 14px;
423
+ background: rgba(255,255,255,0.1);
424
+ border: 1px solid rgba(255,255,255,0.2);
425
+ border-radius: 24px;
426
+ color: white;
427
+ font-size: 0.85rem;
428
+ outline: none;
429
+ }
430
+ input:focus { border-color: #4a9eff; }
431
+ input::placeholder { color: #666; }
432
+ button {
433
+ padding: 10px 20px;
434
+ background: linear-gradient(135deg, #4a9eff, #3a7ecc);
435
+ border: none;
436
+ border-radius: 24px;
437
+ color: white;
438
+ font-weight: 500;
439
+ cursor: pointer;
440
+ font-size: 0.85rem;
441
+ }
442
+ button:hover { opacity: 0.9; }
443
+ button:disabled { opacity: 0.5; cursor: not-allowed; }
444
+ .typing {
445
+ background: rgba(30, 31, 40, 0.95);
446
+ padding: 8px 14px;
447
+ border-radius: 16px;
448
+ align-self: flex-start;
449
+ display: flex;
450
+ gap: 4px;
451
+ }
452
+ .typing span {
453
+ width: 6px;
454
+ height: 6px;
455
+ background: #888;
456
+ border-radius: 50%;
457
+ animation: bounce 1.4s infinite;
458
+ }
459
+ @keyframes bounce {
460
+ 0%, 80%, 100% { transform: scale(0); }
461
+ 40% { transform: scale(1); }
462
+ }
463
+ .badge {
464
+ position: fixed;
465
+ bottom: 5px;
466
+ right: 5px;
467
+ font-size: 0.55rem;
468
+ color: #555;
469
+ background: rgba(0,0,0,0.5);
470
+ padding: 2px 6px;
471
+ border-radius: 10px;
472
+ }
473
+ @media (max-width: 600px) {
474
+ .message { max-width: 95%; }
475
+ }
476
+ </style>
477
+ </head>
478
+ <body>
479
+ <div class="header">
480
+ <h1>🤖 MTP 1.0 - Asistente IA</h1>
481
+ <p>✨ Respuestas completas y naturales | Sin cortes | Inteligente</p>
482
+ </div>
483
+ <div class="messages" id="messages">
484
+ <div class="message bot">✨ Hola, soy MTP 1.0. Doy respuestas completas y naturales, sin cortes artificiales. ¿En qué puedo ayudarte?</div>
485
+ </div>
486
+ <div class="input-area">
487
+ <input type="text" id="input" placeholder="Escribe tu pregunta..." autocomplete="off">
488
+ <button id="send">Enviar</button>
489
+ </div>
490
+ <div class="badge">⚡ MTP 1.0 | 🌡️ 0.45 | Respuestas completas</div>
491
+ <script>
492
+ const messages = document.getElementById('messages');
493
+ const input = document.getElementById('input');
494
+ const sendBtn = document.getElementById('send');
495
+ let loading = false;
496
+
497
+ function addMessage(text, isUser, time = null, chars = null) {
498
+ const div = document.createElement('div');
499
+ div.className = `message ${isUser ? 'user' : 'bot'}`;
500
+ let info = '';
501
+ if (time) info += `⚡ ${time}s`;
502
+ if (chars) info += `${info ? ' | ' : ''}📝 ${chars} chars`;
503
+ div.innerHTML = `<div>${escapeHtml(text)}</div>${info ? `<div style="font-size:0.6rem;color:#666;margin-top:4px;">${info}</div>` : ''}`;
504
+ messages.appendChild(div);
505
+ messages.scrollTop = messages.scrollHeight;
506
+ }
507
+
508
+ function escapeHtml(text) {
509
+ const div = document.createElement('div');
510
+ div.textContent = text;
511
+ return div.innerHTML;
512
+ }
513
+
514
+ function showTyping() {
515
+ const div = document.createElement('div');
516
+ div.className = 'typing';
517
+ div.id = 'typing';
518
+ div.innerHTML = '<span></span><span></span><span></span>';
519
+ messages.appendChild(div);
520
+ messages.scrollTop = messages.scrollHeight;
521
+ }
522
+
523
+ function hideTyping() {
524
+ const el = document.getElementById('typing');
525
+ if (el) el.remove();
526
+ }
527
+
528
+ async function sendMessage() {
529
+ const text = input.value.trim();
530
+ if (!text || loading) return;
531
+
532
+ input.value = '';
533
+ addMessage(text, true);
534
+ loading = true;
535
+ sendBtn.disabled = true;
536
+ showTyping();
537
+
538
+ try {
539
+ const response = await fetch('/generate', {
540
+ method: 'POST',
541
+ headers: { 'Content-Type': 'application/json' },
542
+ body: JSON.stringify({ text: text })
543
+ });
544
+ const data = await response.json();
545
+ hideTyping();
546
+ addMessage(data.reply, false, data.time, data.characters);
547
+ } catch (error) {
548
+ hideTyping();
549
+ addMessage('⚠️ Error de conexión. Intenta de nuevo.', false);
550
+ } finally {
551
+ loading = false;
552
+ sendBtn.disabled = false;
553
+ input.focus();
554
+ }
555
+ }
556
+
557
+ input.addEventListener('keypress', (e) => { if (e.key === 'Enter') sendMessage(); });
558
+ sendBtn.addEventListener('click', sendMessage);
559
+ input.focus();
560
+ </script>
561
+ </body>
562
+ </html>
563
+ """
564
+
565
+ if __name__ == "__main__":
566
+ port = int(os.environ.get("PORT", 7860))
567
+ print("\n" + "=" * 60)
568
+ print(f"🚀 MTP 1.0 en http://0.0.0.0:{port}")
569
+ print(f"📊 Parámetros: {param_count:,} ({param_count/1e6:.2f}M)")
570
+ print(f"🌡️ Temperatura: 0.45 | 🔁 Repetition penalty: 1.2")
571
+ print(f"💡 Respuestas completas - El modelo decide cuándo terminar")
572
+ print(f"💻 Dispositivo: {DEVICE.upper()}")
573
+ print("=" * 60)
574
+
575
+ uvicorn.run(app, host="0.0.0.0", port=port, log_level="warning")