HongFangzhou commited on
Commit
699ffc9
β€’
1 Parent(s): 8ee45cc

should work now

Browse files
Files changed (2) hide show
  1. app.py +10 -3
  2. requirements.txt +6 -6
app.py CHANGED
@@ -2,9 +2,7 @@ import os
2
  import sys
3
  import cv2
4
  import time
5
- import tyro
6
  import json
7
- import kiui
8
  import tqdm
9
  import torch
10
  import mcubes
@@ -22,6 +20,13 @@ from huggingface_hub import hf_hub_download
22
 
23
  sys.path.append("3DTopia")
24
 
 
 
 
 
 
 
 
25
  from ldm.models.diffusion.ddim import DDIMSampler
26
  from ldm.models.diffusion.plms import PLMSSampler
27
  from ldm.models.diffusion.dpm_solver import DPMSolverSampler
@@ -110,6 +115,7 @@ opt.save = GRADIO_SAVE_PATH_MESH
110
  opt.prompt = ''
111
  opt.text_dir = True
112
  opt.front_dir = '+z'
 
113
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
114
  gui = GUI(opt)
115
  ###################################### INIT STAGE 2 #########################################
@@ -135,7 +141,7 @@ def add_text(rgb, caption):
135
 
136
  def marching_cube(b, text, global_info):
137
  # prepare volumn for marching cube
138
- res = 128
139
  assert 'decode_res' in global_info
140
  decode_res = global_info['decode_res']
141
  c_list = torch.linspace(-1.2, 1.2, steps=res)
@@ -358,6 +364,7 @@ def process_stage2(input_model, input_text, input_dir, iters, output_model, outp
358
 
359
  markdown=f'''
360
  # 3DTopia
 
361
  A two-stage text-to-3D generation model. The first stage uses diffusion model to quickly generate candidates. The second stage refines the assets chosen from the first stage.
362
 
363
  ### Usage:
 
2
  import sys
3
  import cv2
4
  import time
 
5
  import json
 
6
  import tqdm
7
  import torch
8
  import mcubes
 
20
 
21
  sys.path.append("3DTopia")
22
 
23
+ os.system("pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch")
24
+ os.system("pip install git+https://github.com/NVlabs/nvdiffrast")
25
+ os.system("pip install git+https://github.com/3DTopia/threefiner")
26
+
27
+ import tyro
28
+ import kiui
29
+
30
  from ldm.models.diffusion.ddim import DDIMSampler
31
  from ldm.models.diffusion.plms import PLMSSampler
32
  from ldm.models.diffusion.dpm_solver import DPMSolverSampler
 
115
  opt.prompt = ''
116
  opt.text_dir = True
117
  opt.front_dir = '+z'
118
+ opt.force_cuda_rast = True
119
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
120
  gui = GUI(opt)
121
  ###################################### INIT STAGE 2 #########################################
 
141
 
142
  def marching_cube(b, text, global_info):
143
  # prepare volumn for marching cube
144
+ res = 64
145
  assert 'decode_res' in global_info
146
  decode_res = global_info['decode_res']
147
  c_list = torch.linspace(-1.2, 1.2, steps=res)
 
364
 
365
  markdown=f'''
366
  # 3DTopia
367
+ ![](https://visitor-badge.laobi.icu/badge?page_id=3DTopia.3DTopia.gradio)
368
  A two-stage text-to-3D generation model. The first stage uses diffusion model to quickly generate candidates. The second stage refines the assets chosen from the first stage.
369
 
370
  ### Usage:
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
- torch
2
- torchvision
3
- torchaudio
 
4
  pytorch-lightning
5
  numpy
6
  tqdm
@@ -55,6 +56,5 @@ vit-pytorch
55
  wandb
56
  wcwidth
57
  zipp
58
- git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch
59
- git+https://github.com/NVlabs/nvdiffrast
60
- git+https://github.com/3DTopia/threefiner
 
1
+ torch==1.13.1+cu117
2
+ torchvision==0.14.1+cu117
3
+ torchaudio==0.13.1
4
+ --extra-index-url https://download.pytorch.org/whl/cu117
5
  pytorch-lightning
6
  numpy
7
  tqdm
 
56
  wandb
57
  wcwidth
58
  zipp
59
+ kiui
60
+ accelerate