euIaxs22 commited on
Commit
153afc8
·
verified ·
1 Parent(s): cea5ee8

Upload 7 files

Browse files
Files changed (7) hide show
  1. Dockerfile +119 -0
  2. README.md +208 -6
  3. app.py +212 -0
  4. builder.sh +351 -0
  5. info.sh +154 -0
  6. requirements.txt +50 -0
  7. start.sh +97 -0
Dockerfile ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # =============================================================================
2
+ # ADUC-SDR Video Suite — High-Perf Diffusers for 8× L40S (SM 8.9)
3
+ # CUDA 12.8 | PyTorch 2.8.0+cu128 | Ubuntu 22.04
4
+ # =============================================================================
5
+ FROM nvidia/cuda:12.8.0-devel-ubuntu22.04
6
+
7
+ LABEL maintainer="Carlos Rodrigues dos Santos & Development Partner"
8
+ LABEL description="High-performance Diffusers stack with FA2/SDPA, 8×L40S"
9
+ LABEL version="4.4.0"
10
+ LABEL cuda_version="12.8.0"
11
+ LABEL python_version="3.10"
12
+ LABEL pytorch_version="2.8.0+cu128"
13
+ LABEL gpu_optimized_for="8x_NVIDIA_L40S"
14
+
15
+ # ---------------- Core env & caches ----------------
16
+ ENV DEBIAN_FRONTEND=noninteractive TZ=UTC LANG=C.UTF-8 LC_ALL=C.UTF-8 \
17
+ PYTHONUNBUFFERED=1 PYTHONDONTWRITEBYTECODE=1 \
18
+ PIP_NO_CACHE_DIR=1 PIP_DISABLE_PIP_VERSION_CHECK=1
19
+
20
+ # GPU/Compute
21
+ ENV NVIDIA_VISIBLE_DEVICES=all
22
+ ENV CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
23
+ ENV TORCH_CUDA_ARCH_LIST="8.9"
24
+ ENV CUDA_DEVICE_ORDER=PCI_BUS_ID
25
+ ENV CUDA_DEVICE_MAX_CONNECTIONS=32
26
+
27
+ # Threads
28
+ ENV OMP_NUM_THREADS=8 MKL_NUM_THREADS=8 MAX_JOBS=160
29
+
30
+ # Alloc/caches
31
+ ENV PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512,garbage_collection_threshold:0.8
32
+ ENV CUDA_LAUNCH_BLOCKING=0 CUDA_CACHE_MAXSIZE=2147483648 CUDA_CACHE_DISABLE=0
33
+
34
+ # Hugging Face caches
35
+ ENV APP_HOME=/app
36
+ WORKDIR $APP_HOME
37
+
38
+
39
+ ENV MODELS_DIR=/app/models
40
+ RUN mkdir -p /home/user/.cache/models && ln -sf /home/user/.cache/models /app/models
41
+
42
+
43
+
44
+ # ---------------- Sistema & Python ----------------
45
+ RUN apt-get update && apt-get install -y --no-install-recommends \
46
+ build-essential tree cmake git git-lfs curl wget ffmpeg ninja-build \
47
+ python3.10 python3.10-dev python3.10-distutils python3-pip \
48
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
49
+
50
+ RUN ln -sf /usr/bin/python3.10 /usr/bin/python3 && \
51
+ ln -sf /usr/bin/python3.10 /usr/bin/python && \
52
+ python3 -m pip install --upgrade pip
53
+
54
+
55
+ # ---------------- PyTorch cu128 (pinado) ----------------
56
+ RUN pip install --index-url https://download.pytorch.org/whl/cu128 \
57
+ torch==2.8.0+cu128 torchvision==0.23.0+cu128 torchaudio==2.8.0+cu128
58
+
59
+ # ---------------- Toolchain, Triton, FA2 (sem bnb) ----------------
60
+ RUN pip install packaging ninja cmake pybind11 scikit-build cython hf_transfer numpy==1.24.4
61
+
62
+ # Triton 3.x (sem triton.ops)
63
+ RUN pip uninstall -y triton || true && \
64
+ pip install -v --no-build-isolation triton==3.4.0
65
+
66
+ # FlashAttention 2.8.x
67
+ RUN pip install flash-attn==2.8.3 --no-build-isolation || \
68
+ pip install flash-attn==2.8.2 --no-build-isolation || \
69
+ pip install flash-attn==2.8.1 --no-build-isolation || \
70
+ pip install flash-attn==2.8.0.post2 --no-build-isolation
71
+
72
+ # Diffusers/Transformers estáveis (sem dev)
73
+ RUN pip install --no-cache-dir diffusers==0.31.0 transformers==4.44.2 accelerate==0.34.2 omegaconf==2.3.0
74
+
75
+ # Opcional: seu fork de otimizações
76
+ RUN pip install -U git+https://github.com/carlex22/diffusers-aduc-sdr
77
+
78
+ # ---------------- Dependências da aplicação ----------------
79
+ COPY requirements.txt ./requirements.txt
80
+ RUN pip install --no-cache-dir -r requirements.txt
81
+
82
+ RUN pip install --upgrade bitsandbytes
83
+
84
+ # Scripts e configs
85
+ COPY info.sh ./app/info.sh
86
+ COPY builder.sh ./app/builder.sh
87
+ COPY start_seedvr.sh ./app/start_seedvr.sh
88
+
89
+ # ---------------- Código e permissões ----------------
90
+ COPY . .
91
+ RUN useradd -m -u 1000 -s /bin/bash appuser && \
92
+ chown -R appuser:appuser /app && \
93
+ chmod 0755 /app/start_seedvr.sh /app/info.sh /app/builder.sh || true
94
+
95
+ USER appuser
96
+
97
+ # Declara volume persistente para HF Spaces
98
+ VOLUME /data
99
+
100
+ # Env vars para caches em /data
101
+ ENV HF_HOME=/data/.cache/huggingface
102
+ ENV TORCH_HOME=/data/.cache/torch
103
+ ENV HF_DATASETS_CACHE=/data/.cache/datasets
104
+ ENV TRANSFORMERS_CACHE=/data/.cache/transformers
105
+ ENV DIFFUSERS_CACHE=/data/.cache/diffusers
106
+ ENV HF_HUB_ENABLE_HF_TRANSFER=1
107
+ ENV TOKENIZERS_PARALLELISM=false
108
+
109
+
110
+
111
+
112
+ VOLUME ["/data/.cache/huggingface/hub", "/data/ckpt/VINCIE-3B" ]
113
+
114
+
115
+
116
+
117
+ # ---------------- Entry ----------------
118
+ ENTRYPOINT ["/app/start.sh"]
119
+ CMD [""]
README.md CHANGED
@@ -1,10 +1,212 @@
1
  ---
2
- title: Tt2
3
- emoji: 🏆
4
- colorFrom: blue
5
- colorTo: pink
6
  sdk: docker
7
- pinned: false
 
 
 
 
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Euia-AducSdr
3
+ emoji: 🎥
4
+ colorFrom: indigo
5
+ colorTo: purple
6
  sdk: docker
7
+ app_file: app.py
8
+ pinned: true
9
+ license: agpl-3.0
10
+ setup_file: setup.sh
11
+ short_description: Uma implementação aberta e funcional da arquitetura ADUC-SDR
12
  ---
13
 
14
+
15
+ ### 🇧🇷 Português
16
+
17
+ Uma implementação aberta e funcional da arquitetura ADUC-SDR (Arquitetura de Unificação Compositiva - Escala Dinâmica e Resiliente), projetada para a geração de vídeo coerente de longa duração. Este projeto materializa os princípios de fragmentação, navegação geométrica e um mecanismo de "eco causal 4bits memoria" para garantir a continuidade física e narrativa em sequências de vídeo geradas por múltiplos modelos de IA.
18
+
19
+ **Licença:** Este projeto é licenciado sob os termos da **GNU Affero General Public License v3.0**. Isto significa que se você usar este software (ou qualquer trabalho derivado) para fornecer um serviço através de uma rede, você é **obrigado a disponibilizar o código-fonte completo** da sua versão para os usuários desse serviço.
20
+
21
+ - **Copyright (C) 4 de Agosto de 2025, Carlos Rodrigues dos Santos**
22
+ - Uma cópia completa da licença pode ser encontrada no arquivo [LICENSE](LICENSE).
23
+
24
+ ---
25
+
26
+ ### 🇬🇧 English
27
+
28
+ An open and functional implementation of the ADUC-SDR (Architecture for Compositive Unification - Dynamic and Resilient Scaling) architecture, designed for long-form coherent video generation. This project materializes the principles of fragmentation, geometric navigation, and a "causal echo 4bits memori" mechanism to ensure physical and narrative continuity in video sequences generated by multiple AI models.
29
+
30
+ **License:** This project is licensed under the terms of the **GNU Affero General Public License v3.0**. This means that if you use this software (or any derivative work) to provide a service over a network, you are **required to make the complete source code** of your version available to the users of that service.
31
+
32
+ - **Copyright (C) August 4, 2025, Carlos Rodrigues dos Santos**
33
+ - A full copy of the license can be found in the [LICENSE](LICENSE) file.
34
+
35
+ ---
36
+
37
+ ## **Aviso de Propriedade Intelectual e Patenteamento**
38
+
39
+ ### **Processo de Patenteamento em Andamento (EM PORTUGUÊS):**
40
+
41
+ A arquitetura e o método **ADUC (Automated Discovery and Orchestration of Complex tasks)**, conforme descritos neste projeto e nas reivindicações associadas, estão **atualmente em processo de patenteamento**.
42
+
43
+ O titular dos direitos, Carlos Rodrigues dos Santos, está buscando proteção legal para as inovações chave da arquitetura ADUC, que incluem, mas não se limitam a:
44
+
45
+ * Fragmentação e escalonamento de solicitações que excedem limites de contexto de modelos de IA.
46
+ * Distribuição inteligente de sub-tarefas para especialistas heterogêneos.
47
+ * Gerenciamento de estado persistido com avaliação iterativa e realimentação para o planejamento de próximas etapas.
48
+ * Planejamento e roteamento sensível a custo, latência e requisitos de qualidade.
49
+ * O uso de "tokens universais" para comunicação agnóstica a modelos.
50
+
51
+ Ao utilizar este software e a arquitetura ADUC aqui implementada, você reconhece a natureza inovadora desta arquitetura e que a **reprodução ou exploração da lógica central da ADUC em sistemas independentes pode infringir direitos de patente pendente.**
52
+
53
+ ---
54
+
55
+ ### **Patent Pending (IN ENGLISH):**
56
+
57
+ The **ADUC (Automated Discovery and Orchestration of Complex tasks)** architecture and method, as described in this project and its associated claims, are **currently in the process of being patented.**
58
+
59
+ The rights holder, Carlos Rodrigues dos Santos, is seeking legal protection for the key innovations of the ADUC architecture, including, but not limited to:
60
+
61
+ * Fragmentation and scaling of requests exceeding AI model context limits.
62
+ * Intelligent distribution of sub-tasks to heterogeneous specialists.
63
+ * Persistent state management with iterative evaluation and feedback for planning subsequent steps.
64
+ * Cost, latency, and quality-aware planning and routing.
65
+ * The use of "universal tokens" for model-agnostic communication.
66
+
67
+ By using this software and the ADUC architecture implemented herein, you acknowledge the innovative nature of this architecture and that **the reproduction or exploitation of ADUC's core logic in independent systems may infringe upon pending patent rights.**
68
+
69
+ ---
70
+
71
+ ### Detalhes Técnicos e Reivindicações da ADUC
72
+
73
+ #### 🇧🇷 Definição Curta (para Tese e Patente)
74
+
75
+ **ADUC** é um *framework pré-input* e *intermediário* de **gerenciamento de prompts** que:
76
+
77
+ 1. **fragmenta** solicitações acima do limite de contexto de qualquer modelo,
78
+ 2. **escala linearmente** (processo sequencial com memória persistida),
79
+ 3. **distribui** sub-tarefas a **especialistas** (modelos/ferramentas heterogêneos), e
80
+ 4. **realimenta** a próxima etapa com avaliação do que foi feito/esperado (LLM diretor).
81
+
82
+ Não é um modelo; é uma **camada orquestradora** plugável antes do input de modelos existentes (texto, imagem, áudio, vídeo), usando *tokens universais* e a tecnologia atual.
83
+
84
+ #### 🇬🇧 Short Definition (for Thesis and Patent)
85
+
86
+ **ADUC** is a *pre-input* and *intermediate* **prompt management framework** that:
87
+
88
+ 1. **fragments** requests exceeding any model's context limit,
89
+ 2. **scales linearly** (sequential process with persisted memory),
90
+ 3. **distributes** sub-tasks to **specialists** (heterogeneous models/tools), and
91
+ 4. **feeds back** to the next step with an evaluation of what was done/expected (director LLM).
92
+
93
+ It is not a model; it is a pluggable **orchestration layer** before the input of existing models (text, image, audio, video), using *universal tokens* and current technology.
94
+
95
+ ---
96
+
97
+ #### 🇧🇷 Elementos Essenciais (Telegráfico)
98
+
99
+ * **Agnóstico a modelos:** opera com qualquer LLM/difusor/API.
100
+ * **Pré-input manager:** recebe pedido do usuário, **divide** em blocos ≤ limite de tokens, **prioriza**, **agenda** e **roteia**.
101
+ * **Memória persistida:** resultados/latentes/“eco” viram **estado compartilhado** para o próximo bloco (nada é ignorado).
102
+ * **Especialistas:** *routers* decidem quem faz o quê (ex.: “descrição → LLM-A”, “keyframe → Img-B”, “vídeo → Vid-C”).
103
+ * **Controle de qualidade:** LLM diretor compara *o que fez* × *o que deveria* × *o que falta* e **regenera objetivos** do próximo fragmento.
104
+ * **Custo/latência-aware:** planeja pela **VRAM/tempo/custo**, não tenta “abraçar tudo de uma vez”.
105
+
106
+ #### 🇬🇧 Essential Elements (Telegraphic)
107
+
108
+ * **Model-agnostic:** operates with any LLM/diffuser/API.
109
+ * **Pre-input manager:** receives user request, **divides** into blocks ≤ token limit, **prioritizes**, **schedules**, and **routes**.
110
+ * **Persisted memory:** results/latents/“echo” become **shared state** for the next block (nothing is ignored).
111
+ * **Specialists:** *routers* decide who does what (e.g., “description → LLM-A”, “keyframe → Img-B”, “video → Vid-C”).
112
+ * **Quality control:** director LLM compares *what was done* × *what should be done* × *what is missing* and **regenerates objectives** for the next fragment.
113
+ * **Cost/latency-aware:** plans by **VRAM/time/cost**, does not try to “embrace everything at once”.
114
+
115
+ ---
116
+
117
+ #### 🇧🇷 Reivindicações Independentes (Método e Sistema)
118
+
119
+ **Reivindicação Independente (Método) — Versão Enxuta:**
120
+
121
+ 1. **Método** de **orquestração de prompts** para execução de tarefas acima do limite de contexto de modelos de IA, compreendendo:
122
+ (a) **receber** uma solicitação que excede um limite de tokens;
123
+ (b) **analisar** a solicitação por um **LLM diretor** e **fragmentá-la** em sub-tarefas ≤ limite;
124
+ (c) **selecionar** especialistas de execução para cada sub-tarefa com base em capacidades declaradas;
125
+ (d) **gerar** prompts específicos por sub-tarefa em **tokens universais**, incluindo referências ao **estado persistido** de execuções anteriores;
126
+ (e) **executar sequencialmente** as sub-tarefas e **persistir** suas saídas como memória (incluindo latentes/eco/artefatos);
127
+ (f) **avaliar** automaticamente a saída versus metas declaradas e **regenerar objetivos** do próximo fragmento;
128
+ (g) **iterar** (b)–(f) até que os critérios de completude sejam atendidos, produzindo o resultado agregado;
129
+ em que o framework **escala linearmente** no tempo e armazenamento físico, **independente** da janela de contexto dos modelos subjacentes.
130
+
131
+ **Reivindicação Independente (Sistema):**
132
+
133
+ 2. **Sistema** de orquestração de prompts, compreendendo: um **planejador LLM diretor**; um **roteador de especialistas**; um **banco de estado persistido** (incl. memória cinética para vídeo); um **gerador de prompts universais**; e um **módulo de avaliação/realimentação**, acoplados por uma **API pré-input** a modelos heterogêneos.
134
+
135
+ #### 🇬🇧 Independent Claims (Method and System)
136
+
137
+ **Independent Claim (Method) — Concise Version:**
138
+
139
+ 1. A **method** for **prompt orchestration** for executing tasks exceeding AI model context limits, comprising:
140
+ (a) **receiving** a request that exceeds a token limit;
141
+ (b) **analyzing** the request by a **director LLM** and **fragmenting it** into sub-tasks ≤ the limit;
142
+ (c) **selecting** execution specialists for each sub-task based on declared capabilities;
143
+ (d) **generating** specific prompts per sub-task in **universal tokens**, including references to the **persisted state** of previous executions;
144
+ (e) **sequentially executing** the sub-tasks and **persisting** their outputs as memory (including latents/echo/artifacts);
145
+ (f) **automatically evaluating** the output against declared goals and **regenerating objectives** for the next fragment;
146
+ (g) **iterating** (b)–(f) until completion criteria are met, producing the aggregated result;
147
+ wherein the framework **scales linearly** in time and physical storage, **independent** of the context window of the underlying models.
148
+
149
+ **Independent Claim (System):**
150
+
151
+ 2. A prompt orchestration **system**, comprising: a **director LLM planner**; a **specialist router**; a **persisted state bank** (incl. kinetic memory for video); a **universal prompt generator**; and an **evaluation/feedback module**, coupled via a **pre-input API** to heterogeneous models.
152
+
153
+ ---
154
+
155
+ #### 🇧🇷 Dependentes Úteis
156
+
157
+ * (3) Onde o roteamento considera **custo/latência/VRAM** e metas de qualidade.
158
+ * (4) Onde o banco de estado inclui **eco cinético** para vídeo (últimos *n* frames/latentes/fluxo).
159
+ * (5) Onde a avaliação usa métricas específicas por domínio (Lflow, consistência semântica, etc.).
160
+ * (6) Onde *tokens universais* padronizam instruções entre especialistas.
161
+ * (7) Onde a orquestração decide **cut vs continuous** e **corte regenerativo** (Déjà-Vu) ao editar vídeo.
162
+ * (8) Onde o sistema **nunca descarta** conteúdo excedente: **reagenda** em novos fragmentos.
163
+
164
+ #### 🇬🇧 Useful Dependents
165
+
166
+ * (3) Wherein routing considers **cost/latency/VRAM** and quality goals.
167
+ * (4) Wherein the state bank includes **kinetic echo** for video (last *n* frames/latents/flow).
168
+ * (5) Wherein evaluation uses domain-specific metrics (Lflow, semantic consistency, etc.).
169
+ * (6) Wherein *universal tokens* standardize instructions between specialists.
170
+ * (7) Wherein orchestration decides **cut vs continuous** and **regenerative cut** (Déjà-Vu) when editing video.
171
+ * (8) Wherein the system **never discards** excess content: it **reschedules** it in new fragments.
172
+
173
+ ---
174
+
175
+ #### 🇧🇷 Como isso conversa com SDR (Vídeo)
176
+
177
+ * **Eco Cinético**: é um **tipo de estado persistido** consumido pelo próximo passo.
178
+ * **Déjà-Vu (Corte Regenerativo)**: é **uma política de orquestração** aplicada quando há edição; ADUC decide, monta os prompts certos e chama o especialista de vídeo.
179
+ * **Cut vs Continuous**: decisão do **diretor** com base em estado + metas; ADUC roteia e garante a sobreposição/remoção final.
180
+
181
+ #### 🇬🇧 How this Converses with SDR (Video)
182
+
183
+ * **Kinetic Echo**: is a **type of persisted state** consumed by the next step.
184
+ * **Déjà-Vu (Regenerative Cut)**: is an **orchestration policy** applied during editing; ADUC decides, crafts the right prompts, and calls the video specialist.
185
+ * **Cut vs Continuous**: decision made by the **director** based on state + goals; ADUC routes and ensures the final overlap/removal.
186
+
187
+ ---
188
+
189
+ #### 🇧🇷 Mensagem Clara ao Usuário (Experiência)
190
+
191
+ > “Seu pedido excede o limite X do modelo Y. Em vez de truncar silenciosamente, o **ADUC** dividirá e **entregará 100%** do conteúdo por etapas coordenadas.”
192
+
193
+ Isso é diferencial prático e jurídico: **não-obviedade** por transformar limite de contexto em **pipeline controlado**, com **persistência de estado** e **avaliação iterativa**.
194
+
195
+ #### 🇬🇧 Clear User Message (Experience)
196
+
197
+ > "Your request exceeds model Y's limit X. Instead of silently truncating, **ADUC** will divide and **deliver 100%** of the content through coordinated steps."
198
+
199
+ This is a practical and legal differentiator: **non-obviousness** by transforming context limits into a **controlled pipeline**, with **state persistence** and **iterative evaluation**.
200
+
201
+ ---
202
+
203
+ ### Contact / Contato / Contacto
204
+
205
+ - **Author / Autor:** Carlos Rodrigues dos Santos
206
+ - **Email:** carlex22@gmail.com
207
+ - **GitHub:** [https://github.com/carlex22/Aduc-sdr](https://github.com/carlex22/Aduc-sdr)
208
+ - **Hugging Face Spaces:**
209
+ - [Ltx-SuperTime-60Secondos](https://huggingface.co/spaces/Carlexx/Ltx-SuperTime-60Secondos/)
210
+ - [Novinho](https://huggingface.co/spaces/Carlexxx/Novinho/)
211
+
212
+ ---
app.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File, Form, BackgroundTasks, HTTPException
2
+ from fastapi.responses import HTMLResponse, FileResponse, JSONResponse
3
+ from pathlib import Path
4
+ import os, uuid, shutil, subprocess, time
5
+
6
+ # -------------------- Configuração de diretórios --------------------
7
+ def default_work_root() -> Path:
8
+ return Path("/data/work") if Path("/data").exists() else Path("/app/work")
9
+
10
+ WORK_ROOT = Path(os.environ.get("WORK_ROOT", default_work_root()))
11
+ JOBS_DIR = WORK_ROOT / "jobs"
12
+ JOBS_DIR.mkdir(parents=True, exist_ok=True)
13
+
14
+ # Caminhos/CLI
15
+ SEEDVR_SCRIPT = os.environ.get("SEEDVR_SCRIPT", "projects/inference_seedvr2_3b.py")
16
+ DEFAULT_PORT = os.environ.get("APP_PORT", "8000")
17
+ API_BASE = os.environ.get("API_BASE", f"http://127.0.0.1:{DEFAULT_PORT}")
18
+
19
+ # -------------------- App FastAPI --------------------
20
+ app = FastAPI(title="SeedVR2-3B API + UI")
21
+
22
+ # Estado simples em memória
23
+ jobs = {} # {job_id: {"status": str, "log": Path, "out": Path, "cmd": list}}
24
+
25
+ # -------------------- Página HTML simples (raiz) --------------------
26
+ INDEX_HTML = """
27
+ <!doctype html>
28
+ <html>
29
+ <head><meta charset="utf-8"><title>SeedVR UI</title></head>
30
+ <body>
31
+ <h2>SeedVR2-3B</h2>
32
+ <p>Use a UI em <a href="/ui">/ui</a> ou a documentação em <a href="/docs">/docs</a>.</p>
33
+ </body>
34
+ </html>
35
+ """
36
+
37
+ @app.get("/", response_class=HTMLResponse)
38
+ async def index():
39
+ return INDEX_HTML
40
+
41
+ # -------------------- Utilitários --------------------
42
+ def visible_gpu_count() -> int:
43
+ try:
44
+ import torch
45
+ if torch.cuda.is_available():
46
+ return torch.cuda.device_count()
47
+ except Exception:
48
+ pass
49
+ # fallback por env
50
+ cuda = os.environ.get("CUDA_VISIBLE_DEVICES", "")
51
+ if not cuda:
52
+ return 0
53
+ return len([x for x in cuda.split(",") if x.strip() != ""])
54
+
55
+ def effective_nproc(nproc: str) -> str:
56
+ ng = max(visible_gpu_count(), 1)
57
+ if nproc == "gpu":
58
+ return str(ng)
59
+ try:
60
+ n = int(nproc)
61
+ return str(max(1, min(n, ng)))
62
+ except Exception:
63
+ return "1"
64
+
65
+ def build_torchrun_cmd(nproc: str, input_dir: Path, output_dir: Path,
66
+ seed: int, res_h: int, res_w: int, sp_size: int):
67
+ nproc_arg = effective_nproc(nproc)
68
+ cmd = [
69
+ "torchrun",
70
+ "--standalone",
71
+ "--rdzv-backend=c10d",
72
+ "--rdzv-endpoint=localhost:0",
73
+ "--nnodes=1",
74
+ f"--nproc-per-node={nproc_arg}",
75
+ SEEDVR_SCRIPT,
76
+ "--video_path", str(input_dir),
77
+ "--output_dir", str(output_dir),
78
+ "--seed", str(seed),
79
+ "--res_h", str(res_h),
80
+ "--res_w", str(res_w),
81
+ "--sp_size", str(sp_size),
82
+ ]
83
+ return cmd
84
+
85
+ def run_seedvr(job_id: str, input_dir: Path, output_dir: Path,
86
+ seed: int, res_h: int, res_w: int, sp_size: int, nproc: str):
87
+ log_path = output_dir / "run.log"
88
+ cmd = build_torchrun_cmd(nproc, input_dir, output_dir, seed, res_h, res_w, sp_size)
89
+ jobs[job_id]["cmd"] = cmd
90
+ output_dir.mkdir(parents=True, exist_ok=True)
91
+ with open(log_path, "a", buffering=1) as lf:
92
+ lf.write("CMD: " + " ".join(cmd) + "\n")
93
+ proc = subprocess.Popen(
94
+ cmd,
95
+ stdout=lf,
96
+ stderr=subprocess.STDOUT,
97
+ cwd=str(Path.cwd()),
98
+ env=os.environ.copy(),
99
+ )
100
+ jobs[job_id]["status"] = "running"
101
+ rc = proc.wait()
102
+ jobs[job_id]["status"] = "done" if rc == 0 else f"error:{rc}"
103
+
104
+ # -------------------- Endpoints de job --------------------
105
+ @app.post("/submit")
106
+ async def submit(background_tasks: BackgroundTasks,
107
+ file: UploadFile = File(...),
108
+ seed: int = Form(42),
109
+ res_h: int = Form(576),
110
+ res_w: int = Form(1024),
111
+ sp_size: int = Form(1),
112
+ nproc: str = Form("1")):
113
+ if not file.filename:
114
+ raise HTTPException(status_code=400, detail="Arquivo ausente")
115
+ job_id = uuid.uuid4().hex
116
+ job_root = JOBS_DIR / job_id
117
+ in_dir = job_root / "inputs"
118
+ out_dir = job_root / "outputs"
119
+ in_dir.mkdir(parents=True, exist_ok=True)
120
+ out_dir.mkdir(parents=True, exist_ok=True)
121
+ dst = in_dir / file.filename
122
+ with open(dst, "wb") as f:
123
+ shutil.copyfileobj(file.file, f)
124
+ jobs[job_id] = {"status": "queued", "log": out_dir / "run.log", "out": out_dir, "cmd": []}
125
+ background_tasks.add_task(run_seedvr, job_id, in_dir, out_dir, seed, res_h, res_w, sp_size, nproc)
126
+ return {"job_id": job_id, "status": "queued", "inputs": str(in_dir), "outputs": str(out_dir)}
127
+
128
+ @app.get("/jobs/{job_id}/status")
129
+ async def job_status(job_id: str):
130
+ if job_id not in jobs:
131
+ raise HTTPException(status_code=404, detail="Job não encontrado")
132
+ j = jobs[job_id]
133
+ resp = {"job_id": job_id, "status": j["status"], "cmd": j.get("cmd", [])}
134
+ log_path = j["log"]
135
+ if log_path.exists():
136
+ try:
137
+ with open(log_path, "r") as f:
138
+ lines = f.readlines()[-50:]
139
+ resp["log_tail"] = "".join(lines)
140
+ except Exception:
141
+ resp["log_tail"] = ""
142
+ return JSONResponse(resp)
143
+
144
+ @app.get("/jobs/{job_id}/result")
145
+ async def job_result(job_id: str):
146
+ if job_id not in jobs:
147
+ raise HTTPException(status_code=404, detail="Job não encontrado")
148
+ out_dir = jobs[job_id]["out"]
149
+ if not out_dir.exists():
150
+ raise HTTPException(status_code=404, detail="Output ausente")
151
+ files = [str(p.name) for p in sorted(out_dir.iterdir()) if p.is_file()]
152
+ return {"job_id": job_id, "files": files}
153
+
154
+ @app.get("/jobs/{job_id}/download/{filename}")
155
+ async def download_file(job_id: str, filename: str):
156
+ if job_id not in jobs:
157
+ raise HTTPException(status_code=404, detail="Job não encontrado")
158
+ file_path = jobs[job_id]["out"] / filename
159
+ if not file_path.exists():
160
+ raise HTTPException(status_code=404, detail="Arquivo não encontrado")
161
+ return FileResponse(str(file_path), filename=filename)
162
+
163
+ # -------------------- UI Gradio (montada em /ui) --------------------
164
+ import gradio as gr
165
+ import requests
166
+
167
+ def submit_and_follow(file, seed, res_h, res_w, sp_size, nproc, progress=gr.Progress()):
168
+ # 1) Submete
169
+ files = {"file": (os.path.basename(file.name), open(file.name, "rb"), "application/octet-stream")}
170
+ data = {"seed": int(seed), "res_h": int(res_h), "res_w": int(res_w), "sp_size": int(sp_size), "nproc": str(nproc)}
171
+ r = requests.post(f"{API_BASE}/submit", files=files, data=data, timeout=600)
172
+ r.raise_for_status()
173
+ job_id = r.json()["job_id"]
174
+ progress(0, desc=f"Job {job_id} criado")
175
+ log_text = f"Job {job_id} criado; acompanhando...\n"
176
+
177
+ # 2) Polling
178
+ pct = 0.05
179
+ while True:
180
+ s = requests.get(f"{API_BASE}/jobs/{job_id}/status", timeout=120).json()
181
+ if "log_tail" in s and s["log_tail"]:
182
+ log_text = f"Status: {s['status']}\n\n{s['log_tail']}"
183
+ else:
184
+ log_text = f"Status: {s['status']}"
185
+ progress(min(pct, 0.95), desc=s["status"])
186
+ yield log_text, job_id, []
187
+ if s["status"].startswith("error"):
188
+ return f"Erro: {s['status']}", job_id, []
189
+ if s["status"] == "done":
190
+ files_resp = requests.get(f"{API_BASE}/jobs/{job_id}/result", timeout=120).json()
191
+ return "Concluído", job_id, files_resp.get("files", [])
192
+ time.sleep(2)
193
+ pct = min(pct + 0.05, 0.95)
194
+
195
+ with gr.Blocks(title="SeedVR2-3B UI") as ui:
196
+ gr.Markdown("### SeedVR2-3B — Processamento de vídeo/imagem")
197
+ with gr.Row():
198
+ in_file = gr.File(label="Vídeo/Imagem", file_types=["image", "video"])
199
+ with gr.Column():
200
+ seed = gr.Number(value=42, label="Seed", precision=0)
201
+ res_h = gr.Number(value=576, label="Altura", precision=0)
202
+ res_w = gr.Number(value=1024, label="Largura", precision=0)
203
+ sp_size = gr.Number(value=1, label="sp_size", precision=0)
204
+ nproc = gr.Dropdown(choices=["1","2","4","gpu"], value="1", label="nproc-per-node")
205
+ go = gr.Button("Iniciar")
206
+ out_log = gr.Markdown(label="Log/Status")
207
+ out_job = gr.Textbox(label="job_id")
208
+ out_files = gr.JSON(label="Arquivos gerados")
209
+ go.click(submit_and_follow, [in_file, seed, res_h, res_w, sp_size, nproc], [out_log, out_job, out_files], queue=True)
210
+
211
+ # Monta o Gradio em /ui
212
+ app = gr.mount_gradio_app(app, ui, path="/ui")
builder.sh ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ echo "🚀 Builder (FlashAttn LayerNorm extra + Apex + Q8) — runtime com GPU visível"
5
+
6
+ # ===== Config e diretórios =====
7
+ export SELF_HF_REPO_ID="${SELF_HF_REPO_ID:-euIaxs22/Aduc-sdr}" # Repo no HF para wheels
8
+ export HF_HOME="${HF_HOME:-/app/model_cache}"
9
+ export HF_HUB_CACHE="${HF_HUB_CACHE:-$HF_HOME/hub}"
10
+ export TORCH_HOME="${TORCH_HOME:-$HF_HOME/torch}"
11
+ export HF_HUB_ENABLE_HF_TRANSFER="${HF_HUB_ENABLE_HF_TRANSFER:-1}"
12
+ export PATH="$HOME/.local/bin:$PATH"
13
+
14
+ mkdir -p /app/wheels /app/cuda_cache "$HF_HOME" "$TORCH_HOME" /app/wheels/src
15
+ chmod -R 777 /app/wheels || true
16
+ export CUDA_CACHE_PATH="/app/cuda_cache"
17
+
18
+ # Preserva licença NGC (se existir)
19
+ if [ -f "/NGC-DL-CONTAINER-LICENSE" ]; then
20
+ cp -f /NGC-DL-CONTAINER-LICENSE /app/wheels/NGC-DL-CONTAINER-LICENSE || true
21
+ fi
22
+
23
+ # ===== Dependências mínimas =====
24
+ python -m pip install -v -U pip build setuptools wheel hatchling hatch-vcs scikit-build-core cmake ninja packaging "huggingface_hub[hf_transfer]" || true
25
+
26
+ # ===== Tags de ambiente (Python/CUDA/Torch) =====
27
+ PY_TAG="$(python -c 'import sys; print(f"cp{sys.version_info[0]}{sys.version_info[1]}")' 2>/dev/null || echo cp310)"
28
+ TORCH_VER="$(python - <<'PY'
29
+ try:
30
+ import torch, re
31
+ v = torch.__version__
32
+ print(re.sub(r'\+.*$', '', v))
33
+ except Exception:
34
+ print("unknown")
35
+ PY
36
+ )"
37
+ CU_TAG="$(python - <<'PY'
38
+ try:
39
+ import torch
40
+ cu = getattr(torch.version, "cuda", None)
41
+ print("cu"+cu.replace(".","")) if cu else print("")
42
+ except Exception:
43
+ print("")
44
+ PY
45
+ )"
46
+ echo "[env] PY_TAG=${PY_TAG} TORCH_VER=${TORCH_VER} CU_TAG=${CU_TAG}"
47
+
48
+ # ============================================================================
49
+ # CHECKERS
50
+ # ============================================================================
51
+
52
+ # Checa especificamente o módulo nativo requerido pelo layer_norm (sem checar 'flash-attn' geral)
53
+ check_flash_layer_norm_bin () {
54
+ python - <<'PY'
55
+ import importlib
56
+ ok = False
57
+ # extensões conhecidas produzidas por csrc/layer_norm
58
+ for name in [
59
+ "dropout_layer_norm", # nome do módulo nativo
60
+ "flash_attn.ops.layer_norm", # wrapper python que usa o nativo
61
+ "flash_attn.ops.rms_norm", # pode depender do mesmo backend em alguns empacotamentos
62
+ ]:
63
+ try:
64
+ importlib.import_module(name)
65
+ ok = True
66
+ break
67
+ except Exception:
68
+ pass
69
+ raise SystemExit(0 if ok else 1)
70
+ PY
71
+ }
72
+
73
+ check_apex () {
74
+ python - <<'PY'
75
+ try:
76
+ from apex.normalization import FusedLayerNorm
77
+ import importlib; importlib.import_module("fused_layer_norm_cuda")
78
+ ok = True
79
+ except Exception:
80
+ ok = False
81
+ raise SystemExit(0 if ok else 1)
82
+ PY
83
+ }
84
+
85
+ check_q8 () {
86
+ python - <<'PY'
87
+ import importlib.util
88
+ spec = importlib.util.find_spec("ltx_q8_kernels") or importlib.util.find_spec("q8_kernels")
89
+ raise SystemExit(0 if spec else 1)
90
+ PY
91
+ }
92
+
93
+ # ============================================================================
94
+ # DOWNLOAD DO HUB (GENÉRICO)
95
+ # ============================================================================
96
+
97
+ # Instala uma wheel do HF por prefixo simples (ex.: apex-, q8_kernels-)
98
+ install_from_hf_by_prefix () {
99
+ local PREFIX="$1"
100
+ echo "[hub] Procurando wheels '${PREFIX}-*.whl' em ${SELF_HF_REPO_ID} com tags ${PY_TAG}/${CU_TAG}"
101
+ python - "$PREFIX" "$PY_TAG" "$CU_TAG" <<'PY' || exit 0
102
+ import os, sys
103
+ from huggingface_hub import HfApi, hf_hub_download, HfFolder
104
+
105
+ prefix, py_tag, cu_tag = sys.argv[1], sys.argv[2], sys.argv[3]
106
+ repo = os.environ.get("SELF_HF_REPO_ID","euIaxs22/Aduc-sdr")
107
+ api = HfApi(token=os.getenv("HF_TOKEN") or HfFolder.get_token())
108
+ try:
109
+ files = api.list_repo_files(repo_id=repo, repo_type="model")
110
+ except Exception:
111
+ raise SystemExit(0)
112
+
113
+ def match(name: str) -> bool:
114
+ return name.endswith(".whl") and name.rsplit("/",1)[-1].startswith(prefix + "-") and (py_tag in name)
115
+
116
+ cands = [f for f in files if match(f)]
117
+ pref = [f for f in cands if cu_tag and cu_tag in f] or cands
118
+ if not pref:
119
+ raise SystemExit(0)
120
+
121
+ target = sorted(pref, reverse=True)[0]
122
+ print(target)
123
+ path = hf_hub_download(repo_id=repo, filename=target, repo_type="model", local_dir="/app/wheels")
124
+ print(path)
125
+ PY
126
+ }
127
+
128
+ # Instala wheels do submódulo layer_norm aceitando variantes de nome
129
+ install_flash_layer_norm_from_hf () {
130
+ echo "[hub] Procurando wheels FlashAttention LayerNorm em ${SELF_HF_REPO_ID}"
131
+ python - "$PY_TAG" "$CU_TAG" <<'PY' || exit 0
132
+ import os, sys, re
133
+ from huggingface_hub import HfApi, hf_hub_download, HfFolder
134
+
135
+ py_tag, cu_tag = sys.argv[1], sys.argv[2]
136
+ repo = os.environ.get("SELF_HF_REPO_ID","euIaxs22/Aduc-sdr")
137
+ api = HfApi(token=os.getenv("HF_TOKEN") or HfFolder.get_token())
138
+ try:
139
+ files = api.list_repo_files(repo_id=repo, repo_type="model")
140
+ except Exception:
141
+ raise SystemExit(0)
142
+
143
+ pats = [
144
+ r"^flash[_-]?attn[_-]?.*layer[_-]?norm-.*\.whl$",
145
+ r"^dropout[_-]?layer[_-]?norm-.*\.whl$",
146
+ ]
147
+ def ok(fn: str) -> bool:
148
+ name = fn.rsplit("/",1)[-1]
149
+ if py_tag not in name: return False
150
+ return any(re.search(p, name, flags=re.I) for p in pats)
151
+
152
+ cands = [f for f in files if ok(f)]
153
+ pref = [f for f in cands if cu_tag and cu_tag in f] or cands
154
+ if not pref:
155
+ raise SystemExit(0)
156
+
157
+ target = sorted(pref, reverse=True)[0]
158
+ print(target)
159
+ path = hf_hub_download(repo_id=repo, filename=target, repo_type="model", local_dir="/app/wheels")
160
+ print(path)
161
+ PY
162
+ }
163
+
164
+ # ============================================================================
165
+ # BUILDERS
166
+ # ============================================================================
167
+
168
+ # Passo extra: SIEMPRE tenta instalar o submódulo layer_norm via wheel do HF;
169
+ # se não houver wheel compatível, compila a partir de csrc/layer_norm e gera wheel.
170
+ build_or_install_flash_layer_norm () {
171
+ echo "[flow] === FlashAttn LayerNorm (passo extra) ==="
172
+
173
+ # 1) Tentar instalar wheel do HF primeiro (evita recompilar)
174
+ HF_OUT="$(install_flash_layer_norm_from_hf || true)"
175
+ if [ -n "${HF_OUT:-}" ]; then
176
+ WHEEL_PATH="$(printf "%s\n" "${HF_OUT}" | tail -n1)"
177
+ echo "[hub] Baixado: ${WHEEL_PATH}"
178
+ python -m pip install -v -U --no-build-isolation --no-deps "${WHEEL_PATH}" || true
179
+ if check_flash_layer_norm_bin; then
180
+ echo "[flow] FlashAttn LayerNorm: OK via wheel do Hub"
181
+ return 0
182
+ fi
183
+ echo "[flow] Wheel do Hub não resolveu import; seguirá com build"
184
+ else
185
+ echo "[hub] Nenhuma wheel compatível encontrada para FlashAttn LayerNorm"
186
+ fi
187
+
188
+ # 2) Build from source do submódulo csrc/layer_norm -> wheel
189
+ local SRC="/app/wheels/src/flash-attn"
190
+ echo "[build] Preparando fonte FlashAttention (layer_norm) em ${SRC}"
191
+ if [ -d "$SRC/.git" ]; then
192
+ git -C "$SRC" fetch --all -p || true
193
+ git -C "$SRC" reset --hard origin/main || true
194
+ git -C "$SRC" clean -fdx || true
195
+ else
196
+ rm -rf "$SRC"
197
+ git clone --depth 1 https://github.com/Dao-AILab/flash-attention "$SRC"
198
+ fi
199
+
200
+ # Define CC alvo a partir da GPU ativa (reduz tempo/ruído de build)
201
+ export TORCH_CUDA_ARCH_LIST="$(python - <<'PY'
202
+ import torch
203
+ try:
204
+ cc = "%d.%d" % torch.cuda.get_device_capability(0)
205
+ print(cc)
206
+ except Exception:
207
+ print("8.9") # fallback p/ Ada (L40S) caso build sem GPU visível
208
+ PY
209
+ )"
210
+ echo "[build] TORCH_CUDA_ARCH_LIST=${TORCH_CUDA_ARCH_LIST}"
211
+
212
+ pushd "$SRC/csrc/layer_norm" >/dev/null
213
+ export MAX_JOBS="${MAX_JOBS:-90}"
214
+ # Gera wheel reutilizável
215
+ python -m pip wheel -v --no-build-isolation --no-deps . -w /app/wheels || true
216
+ popd >/dev/null
217
+
218
+ # Instala a wheel gerada
219
+ local W="$(ls -t /app/wheels/*flash*attn*layer*norm*-*.whl 2>/dev/null | head -n1 || true)"
220
+ if [ -z "${W}" ]; then
221
+ W="$(ls -t /app/wheels/*dropout*layer*norm*-*.whl 2>/dev/null | head -n1 || true)"
222
+ fi
223
+ if [ -z "${W}" ]; then
224
+ # fallback para qualquer .whl recém gerado
225
+ W="$(ls -t /app/wheels/*.whl 2>/dev/null | head -n1 || true)"
226
+ fi
227
+
228
+ if [ -n "${W}" ]; then
229
+ python -m pip install -v -U --no-deps "${W}" || true
230
+ echo "[build] FlashAttn LayerNorm instalado da wheel: ${W}"
231
+ else
232
+ echo "[build] Nenhuma wheel gerada; instalando direto do source (último recurso)"
233
+ python -m pip install -v --no-build-isolation "$SRC/csrc/layer_norm" || true
234
+ fi
235
+
236
+ # Checagem final do binário
237
+ if check_flash_layer_norm_bin; then
238
+ echo "[flow] FlashAttn LayerNorm: import OK após build"
239
+ return 0
240
+ fi
241
+ echo "[flow] FlashAttn LayerNorm: falhou import após build"
242
+ return 1
243
+ }
244
+
245
+ build_apex () {
246
+ local SRC="/app/wheels/src/apex"
247
+ echo "[build] Preparando fonte Apex em ${SRC}"
248
+ if [ -d "$SRC/.git" ]; then
249
+ git -C "$SRC" fetch --all -p || true
250
+ git -C "$SRC" reset --hard HEAD || true
251
+ git -C "$SRC" clean -fdx || true
252
+ else
253
+ rm -rf "$SRC"
254
+ git clone --depth 1 https://github.com/NVIDIA/apex "$SRC"
255
+ fi
256
+ echo "[build] Compilando Apex -> wheel"
257
+ export APEX_CPP_EXT=1 APEX_CUDA_EXT=1 APEX_ALL_CONTRIB_EXT=0
258
+ python -m pip wheel -v --no-build-isolation --no-deps "$SRC" -w /app/wheels || true
259
+ local W="$(ls -t /app/wheels/apex-*.whl 2>/dev/null | head -n1 || true)"
260
+ if [ -n "${W}" ]; then
261
+ python -m pip install -v -U --no-deps "${W}" || true
262
+ echo "[build] Apex instalado da wheel recém-compilada: ${W}"
263
+ else
264
+ echo "[build] Nenhuma wheel Apex gerada; instalando do source"
265
+ python -m pip install -v --no-build-isolation "$SRC" || true
266
+ fi
267
+ }
268
+
269
+ Q8_REPO="${Q8_REPO:-https://github.com/Lightricks/LTX-Video-Q8-Kernels.git}"
270
+ Q8_COMMIT="${Q8_COMMIT:-f3066edea210082799ca5a2bbf9ef0321c5dd8fc}"
271
+ build_q8 () {
272
+ local SRC="/app/wheels/src/q8_kernels"
273
+ rm -rf "$SRC"
274
+ git clone --filter=blob:none "$Q8_REPO" "$SRC"
275
+ git -C "$SRC" checkout "$Q8_COMMIT"
276
+ git -C "$SRC" submodule update --init --recursive
277
+ echo "[build] Compilando Q8 Kernels -> wheel"
278
+ python -m pip wheel -v --no-build-isolation "$SRC" -w /app/wheels || true
279
+ local W="$(ls -t /app/wheels/q8_kernels-*.whl 2>/dev/null | head -n1 || true)"
280
+ if [ -n "${W}" ]; then
281
+ python -m pip install -v -U --no-deps "${W}" || true
282
+ echo "[build] Q8 instalado da wheel recém-compilada: ${W}"
283
+ else
284
+ echo "[build] Nenhuma wheel q8_kernels gerada; instalando do source"
285
+ python -m pip install -v --no-build-isolation "$SRC" || true
286
+ fi
287
+ }
288
+
289
+ # ============================================================================
290
+ # EXECUÇÃO
291
+ # ============================================================================
292
+
293
+ # Passo adicional SEM depender de "flash-attn" já instalado: trata somente o layer_norm
294
+ build_or_install_flash_layer_norm || true
295
+
296
+ # Apex (mantido)
297
+ # Tenta primeiro via wheel no HF e, se não houver, compila e instala em wheel
298
+ echo "[flow] === apex ==="
299
+ HF_OUT="$(install_from_hf_by_prefix "apex" || true)"
300
+ if [ -n "${HF_OUT:-}" ]; then
301
+ WHEEL_PATH="$(printf "%s\n" "${HF_OUT}" | tail -n1)"
302
+ echo "[hub] Baixado: ${WHEEL_PATH}"
303
+ python -m pip install -v -U --no-build-isolation "${WHEEL_PATH}" || true
304
+ if ! check_apex; then
305
+ echo "[flow] apex: import falhou após wheel; compilando"
306
+ build_apex || true
307
+ fi
308
+ else
309
+ echo "[hub] Nenhuma wheel apex compatível; compilando"
310
+ build_apex || true
311
+ fi
312
+
313
+ # Q8 (opcional)
314
+ # echo "[flow] === q8_kernels ==="
315
+ # HF_OUT="$(install_from_hf_by_prefix "q8_kernels" || true)"
316
+ # if [ -n "${HF_OUT:-}" ]; then
317
+ # WHEEL_PATH="$(printf "%s\n" "${HF_OUT}" | tail -n1)"
318
+ # echo "[hub] Baixado: ${WHEEL_PATH}"
319
+ # python -m pip install -v -U --no-build-isolation "${WHEEL_PATH}" || true
320
+ # if ! check_q8; then
321
+ # echo "[flow] q8_kernels: import falhou após wheel; compilando"
322
+ # build_q8 || true
323
+ # fi
324
+ # else
325
+ # echo "[hub] Nenhuma wheel q8_kernels compatível; compilando"
326
+ # build_q8 || true
327
+ # fi
328
+
329
+ # Upload de wheels produzidas para o HF (cache cross-restarts)
330
+ python - <<'PY'
331
+ import os
332
+ from huggingface_hub import HfApi, HfFolder
333
+
334
+ repo = os.environ.get("SELF_HF_REPO_ID","euIaxs22/Aduc-sdr")
335
+ token = os.getenv("HF_TOKEN") or HfFolder.get_token()
336
+ if not token:
337
+ raise SystemExit("HF_TOKEN ausente; upload desabilitado")
338
+
339
+ api = HfApi(token=token)
340
+ api.upload_folder(
341
+ folder_path="/app/wheels",
342
+ repo_id=repo,
343
+ repo_type="model",
344
+ allow_patterns=["*.whl","NGC-DL-CONTAINER-LICENSE"],
345
+ ignore_patterns=["**/src/**","**/*.log","**/logs/**",".git/**"],
346
+ )
347
+ print("Upload concluído (wheels + licença).")
348
+ PY
349
+
350
+ chmod -R 777 /app/wheels || true
351
+ echo "✅ Builder finalizado."
info.sh ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -euo pipefail
4
+
5
+ echo "================= RUNTIME CAPABILITIES ================="
6
+ date
7
+
8
+ echo
9
+ if command -v nvidia-smi >/dev/null 2>&1; then
10
+ nvidia-smi
11
+ else
12
+ echo "nvidia-smi: not available"
13
+ fi
14
+ echo
15
+
16
+ echo "CUDA_HOME: ${CUDA_HOME:-/usr/local/cuda}"
17
+ if command -v nvcc >/dev/null 2>&1; then
18
+ nvcc --version || true
19
+ else
20
+ echo "nvcc: not available"
21
+ fi
22
+ echo
23
+
24
+ echo "[PyTorch / CUDA backend]"
25
+ python3 - <<'PY'
26
+ import json, os, torch, inspect
27
+
28
+ def to_bool(x):
29
+ try:
30
+ if callable(x):
31
+ try:
32
+ sig = inspect.signature(x)
33
+ if len(sig.parameters)==0:
34
+ return bool(x())
35
+ except Exception:
36
+ pass
37
+ return True
38
+ return bool(x)
39
+ except Exception:
40
+ return None
41
+
42
+ info = {
43
+ "torch": getattr(torch, "__version__", None),
44
+ "cuda_available": torch.cuda.is_available(),
45
+ "cuda_device_count": torch.cuda.device_count(),
46
+ "cuda_runtime_version": getattr(torch.version, "cuda", None),
47
+ "cudnn_version": torch.backends.cudnn.version() if torch.backends.cudnn.is_available() else None,
48
+ "tf32": (torch.backends.cuda.matmul.allow_tf32 if torch.cuda.is_available() else None),
49
+ "flash_sdp": (to_bool(getattr(torch.backends.cuda, "enable_flash_sdp", None)) if torch.cuda.is_available() else None),
50
+ "mem_efficient_sdp": (to_bool(getattr(torch.backends.cuda, "enable_mem_efficient_sdp", None)) if torch.cuda.is_available() else None),
51
+ "math_sdp": (to_bool(getattr(torch.backends.cuda, "enable_math_sdp", None)) if torch.cuda.is_available() else None),
52
+ }
53
+ print(json.dumps(info, indent=2))
54
+ for i in range(min(torch.cuda.device_count(), 16)):
55
+ print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
56
+ PY
57
+ echo
58
+
59
+ echo "[Apex (FusedLayerNorm/RMSNorm)]"
60
+ python3 - <<'PY'
61
+ try:
62
+ from apex.normalization import FusedLayerNorm, FusedRMSNorm
63
+ import importlib; importlib.import_module("fused_layer_norm_cuda")
64
+ print("apex.normalization: OK")
65
+ except Exception as e:
66
+ print("apex.normalization: FAIL ->", e)
67
+ PY
68
+ echo
69
+
70
+ echo "[FlashAttention (CUDA/Triton/RMSNorm)]"
71
+ python3 - <<'PY'
72
+ import importlib
73
+ mods = [
74
+ 'flash_attn', 'flash_attn_2_cuda',
75
+ 'flash_attn.ops.rms_norm', 'flash_attn.ops.layer_norm',
76
+ 'flash_attn.layers.layer_norm'
77
+ ]
78
+ for m in mods:
79
+ try:
80
+ importlib.import_module(m)
81
+ print(f"{m}: OK")
82
+ except Exception as e:
83
+ print(f"{m}: FAIL -> {e}")
84
+ PY
85
+ echo
86
+
87
+ echo "[FlashAttention versão/details]"
88
+ python3 - <<'PY'
89
+ try:
90
+ import flash_attn
91
+ fa_ver = getattr(flash_attn, "__version__", None)
92
+ print(f"flash_attn: {fa_ver}")
93
+ except Exception:
94
+ print("flash_attn: not importable.")
95
+ try:
96
+ import torch
97
+ print(f"torch: {torch.__version__} | cuda: {getattr(torch.version, 'cuda', None)}")
98
+ except Exception:
99
+ pass
100
+ PY
101
+ echo
102
+
103
+ echo "[Triton]"
104
+ python3 - <<'PY'
105
+ try:
106
+ import triton
107
+ print("triton:", triton.__version__)
108
+ try:
109
+ import triton.ops as _; print("triton.ops: OK")
110
+ except Exception:
111
+ print("triton.ops: not present (ok on Triton>=3.x)")
112
+ except Exception as e:
113
+ print("triton: FAIL ->", e)
114
+ PY
115
+ echo
116
+
117
+ echo "[BitsAndBytes (Q8/Q4)]"
118
+ python3 - <<'PY'
119
+ try:
120
+ import bitsandbytes as bnb
121
+ print("bitsandbytes:", bnb.__version__)
122
+ try:
123
+ from bitsandbytes.triton import _custom_ops as _; print("bnb.triton._custom_ops: OK")
124
+ except Exception as e:
125
+ print("bnb.triton: partial ->", e)
126
+ except Exception as e:
127
+ print("bitsandbytes: FAIL ->", e)
128
+ PY
129
+ echo
130
+
131
+ echo "[Transformers / Diffusers / XFormers / EcoML]"
132
+ python3 - <<'PY'
133
+ def _v(m):
134
+ try:
135
+ mod = __import__(m)
136
+ print(f"{m}: {getattr(mod, '__version__', 'unknown')}")
137
+ except Exception as e:
138
+ print(f"{m}: FAIL -> {e}")
139
+ for m in ("transformers", "diffusers", "xformers", "ecuml", "mlx", "ecobase"):
140
+ _v(m)
141
+ PY
142
+ echo
143
+
144
+ echo "[Distribuído / NCCL Env]"
145
+ env | grep -E '^(CUDA_VISIBLE_DEVICES|NCCL_|TORCH_|ENABLE_.*SDP|HF_HUB_.*|CUDA_|NV_.*NCCL.*|PYTORCH_CUDA_ALLOC_CONF)=' | sort
146
+ echo
147
+
148
+ echo "[Output dir/perms]"
149
+ OUT="/app/outputs"
150
+ echo "OUT dir: $OUT"
151
+ mkdir -p "$OUT"
152
+ ls -la "$OUT" || true
153
+
154
+ echo "================= END CAPABILITIES ================="
requirements.txt ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ opencv-python-headless
3
+ pillow>=10.4.0
4
+ numpy==1.24.4
5
+
6
+ safetensors>=0.4.5
7
+ einops>=0.8.0
8
+ pyyaml>=6.0.2
9
+ omegaconf==2.3.0
10
+ mediapy
11
+ librosa
12
+ moviepy
13
+ OmegaConf
14
+ beartype
15
+ sentencepiece
16
+ gradio[oauth]
17
+ mediapy
18
+ fastapi
19
+ uvicorn[standard]
20
+ tiktoken
21
+ transformers_stream_generator
22
+ rotary-embedding-torch
23
+ transformers
24
+ accelerate
25
+ safetensors
26
+ einops
27
+ decord
28
+ huggingface_hub
29
+ google-generativeai
30
+ gradio
31
+ tabulate
32
+ pydantic
33
+ soundfile
34
+ requests
35
+ hf_transfer
36
+ timm
37
+ ftfy
38
+ easydict
39
+
40
+
41
+ #vince
42
+ bitsandbytes==0.45.0
43
+ einop
44
+ numpy
45
+ rotary-embedding-torch
46
+ safetensors
47
+ sentencepiece
48
+ torch
49
+ torchvision
50
+ transformers
start.sh ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ # =======================
5
+ # SeedVR — start UI
6
+ # =======================
7
+
8
+
9
+ # 1) Builder (garante Apex/Flash e deps CUDA)
10
+ echo "🛠️ Iniciando o builder.sh para compilar/instalar dependências CUDA..."
11
+ if [ -f "/app/builder.sh" ]; then
12
+ /bin/bash /app/builder.sh
13
+ echo "✅ Builder finalizado."
14
+ else
15
+ echo "⚠️ Aviso: builder.sh não encontrado. Pulando etapa de compilação de dependências."
16
+ fi
17
+
18
+ # Pastas e variáveis (podem ser sobrescritas via env)
19
+ export SEEDVR_ROOT="${SEEDVR_ROOT:-/data/SeedVR}"
20
+ export CKPTS_ROOT="${CKPTS_ROOT:-/data/ckpts/SeedVR2-3B}"
21
+ export OUTPUT_ROOT="${OUTPUT_ROOT:-/app/outputs}"
22
+ export INPUT_ROOT="${INPUT_ROOT:-/app/inputs}"
23
+
24
+ # Transformers v5 recomenda HF_HOME
25
+ export HF_HOME="${HF_HOME:-/data/.cache/huggingface}"
26
+ export HF_TOKEN="${HF_TOKEN:-${HUGGINGFACE_TOKEN:-}}"
27
+
28
+ # Repo/model identifiers
29
+ export SEEDVR_GIT_URL="${SEEDVR_GIT_URL:-https://github.com/ByteDance-Seed/SeedVR.git}"
30
+ export SEEDVR_REPO_ID="${SEEDVR_REPO_ID:-ByteDance-Seed/SeedVR2-3B}"
31
+
32
+ # Multi-GPU / torchrun
33
+ export NUM_GPUS="${NUM_GPUS:-8}"
34
+ export NCCL_P2P_LEVEL="${NCCL_P2P_LEVEL:-NVL}"
35
+ export NCCL_ASYNC_ERROR_HANDLING="${NCCL_ASYNC_ERROR_HANDLING:-1}"
36
+ export OMP_NUM_THREADS="${OMP_NUM_THREADS:-8}"
37
+
38
+ # Gradio
39
+ export GRADIO_SERVER_NAME="${GRADIO_SERVER_NAME:-0.0.0.0}"
40
+ export GRADIO_SERVER_PORT="${GRADIO_SERVER_PORT:-7860}"
41
+
42
+ mkdir -p "$SEEDVR_ROOT" "$CKPTS_ROOT" "$OUTPUT_ROOT" "$INPUT_ROOT" "$HF_HOME"
43
+
44
+ echo "[seedvr][start] checking environment..."
45
+ command -v python >/dev/null || { echo "[seedvr][start] python not found"; exit 1; }
46
+ command -v nvidia-smi >/dev/null && nvidia-smi || echo "[seedvr][start] warn: nvidia-smi not available"
47
+
48
+ echo "[seedvr][start] cloning repo if missing: $SEEDVR_ROOT"
49
+ if [ ! -d "$SEEDVR_ROOT/.git" ]; then
50
+ git clone "$SEEDVR_GIT_URL" "$SEEDVR_ROOT"
51
+ else
52
+ echo "[seedvr][start] repo present"
53
+ fi
54
+
55
+ echo "[seedvr][start] downloading model (snapshot_download) into $CKPTS_ROOT"
56
+ python - <<PY
57
+ import os
58
+ from pathlib import Path
59
+ from huggingface_hub import snapshot_download
60
+
61
+ repo_id = os.environ["SEEDVR_REPO_ID"]
62
+ save_dir = os.environ["CKPTS_ROOT"]
63
+ cache_dir = os.environ["HF_HOME"]
64
+ token = os.environ.get("HF_TOKEN") or None
65
+
66
+ Path(save_dir).mkdir(parents=True, exist_ok=True)
67
+ snapshot_download(
68
+ repo_id=repo_id,
69
+ cache_dir=cache_dir,
70
+ local_dir=save_dir,
71
+ local_dir_use_symlinks=False,
72
+ resume_download=True,
73
+ allow_patterns=["*.json", "*.safetensors", "*.pth", "*.bin", "*.py", "*.md", "*.txt"],
74
+ token=token,
75
+ )
76
+ print("[seedvr][start] snapshot_download ok:", save_dir)
77
+ PY
78
+
79
+
80
+ export OUTPUT_ROOT=/app/outputs
81
+ mkdir -p "$OUTPUT_ROOT" && chmod -R 777 "$OUTPUT_ROOT" || true
82
+
83
+
84
+ echo "[seedvr][start] ensuring ckpt symlink SeedVR/ckpts/SeedVR2-3B -> $CKPTS_ROOT"
85
+ mkdir -p "$SEEDVR_ROOT/ckpts"
86
+ if [ -L "$SEEDVR_ROOT/ckpts/SeedVR2-3B" ]; then
87
+ target="$(readlink -f "$SEEDVR_ROOT/ckpts/SeedVR2-3B" || true)"
88
+ if [ "$target" != "$CKPTS_ROOT" ]; then
89
+ rm -f "$SEEDVR_ROOT/ckpts/SeedVR2-3B"
90
+ fi
91
+ fi
92
+ if [ ! -e "$SEEDVR_ROOT/ckpts/SeedVR2-3B" ]; then
93
+ ln -s "$CKPTS_ROOT" "$SEEDVR_ROOT/ckpts/SeedVR2-3B"
94
+ fi
95
+
96
+ echo "[seedvr][start] launching app_seedvr.py at ${GRADIO_SERVER_NAME}:${GRADIO_SERVER_PORT}"
97
+ exec python app_seedvr.py