Mariam-Elz commited on
Commit
eb19fd4
·
verified ·
1 Parent(s): ece306c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +338 -97
app.py CHANGED
@@ -336,105 +336,346 @@
336
 
337
 
338
  #############7tth################
339
- import torch
340
- import torch.nn as nn
341
- import gradio as gr
342
- import requests
343
- import os
344
- import torchvision.transforms as transforms
345
- import numpy as np
346
- from PIL import Image
 
 
 
 
 
 
 
347
 
348
- # Hugging Face Model Repository
349
- model_repo = "Mariam-Elz/CRM"
350
-
351
- # Model File Path
352
- model_path = "models/CRM.pth"
353
- os.makedirs("models", exist_ok=True)
354
-
355
- # Download model weights if not present
356
- if not os.path.exists(model_path):
357
- url = f"https://huggingface.co/{model_repo}/resolve/main/CRM.pth"
358
- print(f"Downloading CRM.pth...")
359
- response = requests.get(url)
360
- with open(model_path, "wb") as f:
361
- f.write(response.content)
362
-
363
- # Set Device
364
- device = "cuda" if torch.cuda.is_available() else "cpu"
365
-
366
- # Define Model Architecture (Replace with your actual model)
367
- class CRMModel(nn.Module):
368
- def __init__(self):
369
- super(CRMModel, self).__init__()
370
- self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
371
- self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
372
- self.relu = nn.ReLU()
373
 
374
- def forward(self, x):
375
- x = self.relu(self.conv1(x))
376
- x = self.relu(self.conv2(x))
377
- return x
378
-
379
- # Load Model
380
- def load_model():
381
- print("Loading model...")
382
- model = CRMModel() # Use the correct architecture here
383
- state_dict = torch.load(model_path, map_location=device)
384
-
385
- if isinstance(state_dict, dict): # Ensure it's a valid state_dict
386
- model.load_state_dict(state_dict)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387
  else:
388
- raise ValueError("Error: The loaded state_dict is not in the correct format.")
389
-
390
- model.to(device)
391
- model.eval()
392
- print("Model loaded successfully!")
393
- return model
394
-
395
- # Load the model
396
- model = load_model()
397
-
398
- # Define Inference Function
399
- def infer(image):
400
- """Process input image and return a reconstructed 3D output."""
401
- try:
402
- print("Preprocessing image...")
403
-
404
- # Convert image to PyTorch tensor & normalize
405
- transform = transforms.Compose([
406
- transforms.Resize((256, 256)), # Resize to fit model input
407
- transforms.ToTensor(), # Converts to tensor (C, H, W)
408
- transforms.Normalize(mean=[0.5], std=[0.5]), # Normalize
409
- ])
410
- image_tensor = transform(image).unsqueeze(0).to(device) # Add batch dimension
411
-
412
- print("Running inference...")
413
- with torch.no_grad():
414
- output = model(image_tensor) # Forward pass
415
-
416
- # Ensure output is a valid tensor
417
- if isinstance(output, torch.Tensor):
418
- output_image = output.squeeze(0).permute(1, 2, 0).cpu().numpy()
419
- output_image = np.clip(output_image * 255.0, 0, 255).astype(np.uint8)
420
- print("Inference complete! Returning output.")
421
- return output_image
422
- else:
423
- print("Error: Model output is not a tensor.")
424
- return None
425
-
426
- except Exception as e:
427
- print(f"Error during inference: {e}")
428
- return None
429
-
430
- # Create Gradio UI
431
- demo = gr.Interface(
432
- fn=infer,
433
- inputs=gr.Image(type="pil"),
434
- outputs=gr.Image(type="numpy"),
435
- title="Convolutional Reconstruction Model",
436
- description="Upload an image to get the reconstructed output."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437
  )
438
 
439
- if __name__ == "__main__":
440
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336
 
337
 
338
  #############7tth################
339
+ # import torch
340
+ # import torch.nn as nn
341
+ # import gradio as gr
342
+ # import requests
343
+ # import os
344
+ # import torchvision.transforms as transforms
345
+ # import numpy as np
346
+ # from PIL import Image
347
+
348
+ # # Hugging Face Model Repository
349
+ # model_repo = "Mariam-Elz/CRM"
350
+
351
+ # # Model File Path
352
+ # model_path = "models/CRM.pth"
353
+ # os.makedirs("models", exist_ok=True)
354
 
355
+ # # Download model weights if not present
356
+ # if not os.path.exists(model_path):
357
+ # url = f"https://huggingface.co/{model_repo}/resolve/main/CRM.pth"
358
+ # print(f"Downloading CRM.pth...")
359
+ # response = requests.get(url)
360
+ # with open(model_path, "wb") as f:
361
+ # f.write(response.content)
362
+
363
+ # # Set Device
364
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
365
+
366
+ # # Define Model Architecture (Replace with your actual model)
367
+ # class CRMModel(nn.Module):
368
+ # def __init__(self):
369
+ # super(CRMModel, self).__init__()
370
+ # self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
371
+ # self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
372
+ # self.relu = nn.ReLU()
 
 
 
 
 
 
 
373
 
374
+ # def forward(self, x):
375
+ # x = self.relu(self.conv1(x))
376
+ # x = self.relu(self.conv2(x))
377
+ # return x
378
+
379
+ # # Load Model
380
+ # def load_model():
381
+ # print("Loading model...")
382
+ # model = CRMModel() # Use the correct architecture here
383
+ # state_dict = torch.load(model_path, map_location=device)
384
+
385
+ # if isinstance(state_dict, dict): # Ensure it's a valid state_dict
386
+ # model.load_state_dict(state_dict)
387
+ # else:
388
+ # raise ValueError("Error: The loaded state_dict is not in the correct format.")
389
+
390
+ # model.to(device)
391
+ # model.eval()
392
+ # print("Model loaded successfully!")
393
+ # return model
394
+
395
+ # # Load the model
396
+ # model = load_model()
397
+
398
+ # # Define Inference Function
399
+ # def infer(image):
400
+ # """Process input image and return a reconstructed 3D output."""
401
+ # try:
402
+ # print("Preprocessing image...")
403
+
404
+ # # Convert image to PyTorch tensor & normalize
405
+ # transform = transforms.Compose([
406
+ # transforms.Resize((256, 256)), # Resize to fit model input
407
+ # transforms.ToTensor(), # Converts to tensor (C, H, W)
408
+ # transforms.Normalize(mean=[0.5], std=[0.5]), # Normalize
409
+ # ])
410
+ # image_tensor = transform(image).unsqueeze(0).to(device) # Add batch dimension
411
+
412
+ # print("Running inference...")
413
+ # with torch.no_grad():
414
+ # output = model(image_tensor) # Forward pass
415
+
416
+ # # Ensure output is a valid tensor
417
+ # if isinstance(output, torch.Tensor):
418
+ # output_image = output.squeeze(0).permute(1, 2, 0).cpu().numpy()
419
+ # output_image = np.clip(output_image * 255.0, 0, 255).astype(np.uint8)
420
+ # print("Inference complete! Returning output.")
421
+ # return output_image
422
+ # else:
423
+ # print("Error: Model output is not a tensor.")
424
+ # return None
425
+
426
+ # except Exception as e:
427
+ # print(f"Error during inference: {e}")
428
+ # return None
429
+
430
+ # # Create Gradio UI
431
+ # demo = gr.Interface(
432
+ # fn=infer,
433
+ # inputs=gr.Image(type="pil"),
434
+ # outputs=gr.Image(type="numpy"),
435
+ # title="Convolutional Reconstruction Model",
436
+ # description="Upload an image to get the reconstructed output."
437
+ # )
438
+
439
+ # if __name__ == "__main__":
440
+ # demo.launch()
441
+
442
+
443
+
444
+
445
+ # Not ready to use yet
446
+ import spaces
447
+ import argparse
448
+ import numpy as np
449
+ import gradio as gr
450
+ from omegaconf import OmegaConf
451
+ import torch
452
+ from PIL import Image
453
+ import PIL
454
+ from pipelines import TwoStagePipeline
455
+ from huggingface_hub import hf_hub_download
456
+ import os
457
+ import rembg
458
+ from typing import Any
459
+ import json
460
+ import os
461
+ import json
462
+ import argparse
463
+
464
+ from model import CRM
465
+ from inference import generate3d
466
+
467
+ pipeline = None
468
+ rembg_session = rembg.new_session()
469
+
470
+
471
+ def expand_to_square(image, bg_color=(0, 0, 0, 0)):
472
+ # expand image to 1:1
473
+ width, height = image.size
474
+ if width == height:
475
+ return image
476
+ new_size = (max(width, height), max(width, height))
477
+ new_image = Image.new("RGBA", new_size, bg_color)
478
+ paste_position = ((new_size[0] - width) // 2, (new_size[1] - height) // 2)
479
+ new_image.paste(image, paste_position)
480
+ return new_image
481
+
482
+ def check_input_image(input_image):
483
+ if input_image is None:
484
+ raise gr.Error("No image uploaded!")
485
+
486
+
487
+ def remove_background(
488
+ image: PIL.Image.Image,
489
+ rembg_session: Any = None,
490
+ force: bool = False,
491
+ **rembg_kwargs,
492
+ ) -> PIL.Image.Image:
493
+ do_remove = True
494
+ if image.mode == "RGBA" and image.getextrema()[3][0] < 255:
495
+ # explain why current do not rm bg
496
+ print("alhpa channl not enpty, skip remove background, using alpha channel as mask")
497
+ background = Image.new("RGBA", image.size, (0, 0, 0, 0))
498
+ image = Image.alpha_composite(background, image)
499
+ do_remove = False
500
+ do_remove = do_remove or force
501
+ if do_remove:
502
+ image = rembg.remove(image, session=rembg_session, **rembg_kwargs)
503
+ return image
504
+
505
+ def do_resize_content(original_image: Image, scale_rate):
506
+ # resize image content wile retain the original image size
507
+ if scale_rate != 1:
508
+ # Calculate the new size after rescaling
509
+ new_size = tuple(int(dim * scale_rate) for dim in original_image.size)
510
+ # Resize the image while maintaining the aspect ratio
511
+ resized_image = original_image.resize(new_size)
512
+ # Create a new image with the original size and black background
513
+ padded_image = Image.new("RGBA", original_image.size, (0, 0, 0, 0))
514
+ paste_position = ((original_image.width - resized_image.width) // 2, (original_image.height - resized_image.height) // 2)
515
+ padded_image.paste(resized_image, paste_position)
516
+ return padded_image
517
  else:
518
+ return original_image
519
+
520
+ def add_background(image, bg_color=(255, 255, 255)):
521
+ # given an RGBA image, alpha channel is used as mask to add background color
522
+ background = Image.new("RGBA", image.size, bg_color)
523
+ return Image.alpha_composite(background, image)
524
+
525
+
526
+ def preprocess_image(image, background_choice, foreground_ratio, backgroud_color):
527
+ """
528
+ input image is a pil image in RGBA, return RGB image
529
+ """
530
+ print(background_choice)
531
+ if background_choice == "Alpha as mask":
532
+ background = Image.new("RGBA", image.size, (0, 0, 0, 0))
533
+ image = Image.alpha_composite(background, image)
534
+ else:
535
+ image = remove_background(image, rembg_session, force=True)
536
+ image = do_resize_content(image, foreground_ratio)
537
+ image = expand_to_square(image)
538
+ image = add_background(image, backgroud_color)
539
+ return image.convert("RGB")
540
+
541
+ @spaces.GPU
542
+ def gen_image(input_image, seed, scale, step):
543
+ global pipeline, model, args
544
+ pipeline.set_seed(seed)
545
+ rt_dict = pipeline(input_image, scale=scale, step=step)
546
+ stage1_images = rt_dict["stage1_images"]
547
+ stage2_images = rt_dict["stage2_images"]
548
+ np_imgs = np.concatenate(stage1_images, 1)
549
+ np_xyzs = np.concatenate(stage2_images, 1)
550
+
551
+ glb_path = generate3d(model, np_imgs, np_xyzs, args.device)
552
+ return Image.fromarray(np_imgs), Image.fromarray(np_xyzs), glb_path#, obj_path
553
+
554
+
555
+ parser = argparse.ArgumentParser()
556
+ parser.add_argument(
557
+ "--stage1_config",
558
+ type=str,
559
+ default="configs/nf7_v3_SNR_rd_size_stroke.yaml",
560
+ help="config for stage1",
561
+ )
562
+ parser.add_argument(
563
+ "--stage2_config",
564
+ type=str,
565
+ default="configs/stage2-v2-snr.yaml",
566
+ help="config for stage2",
567
+ )
568
+
569
+ parser.add_argument("--device", type=str, default="cuda")
570
+ args = parser.parse_args()
571
+
572
+ crm_path = hf_hub_download(repo_id="Zhengyi/CRM", filename="CRM.pth")
573
+ specs = json.load(open("configs/specs_objaverse_total.json"))
574
+ model = CRM(specs)
575
+ model.load_state_dict(torch.load(crm_path, map_location="cpu"), strict=False)
576
+ model = model.to(args.device)
577
+
578
+ stage1_config = OmegaConf.load(args.stage1_config).config
579
+ stage2_config = OmegaConf.load(args.stage2_config).config
580
+ stage2_sampler_config = stage2_config.sampler
581
+ stage1_sampler_config = stage1_config.sampler
582
+
583
+ stage1_model_config = stage1_config.models
584
+ stage2_model_config = stage2_config.models
585
+
586
+ xyz_path = hf_hub_download(repo_id="Zhengyi/CRM", filename="ccm-diffusion.pth")
587
+ pixel_path = hf_hub_download(repo_id="Zhengyi/CRM", filename="pixel-diffusion.pth")
588
+ stage1_model_config.resume = pixel_path
589
+ stage2_model_config.resume = xyz_path
590
+
591
+ pipeline = TwoStagePipeline(
592
+ stage1_model_config,
593
+ stage2_model_config,
594
+ stage1_sampler_config,
595
+ stage2_sampler_config,
596
+ device=args.device,
597
+ dtype=torch.float32
598
  )
599
 
600
+ _DESCRIPTION = '''
601
+ * Our [official implementation](https://github.com/thu-ml/CRM) uses UV texture instead of vertex color. It has better texture than this online demo.
602
+ * Project page of CRM: https://ml.cs.tsinghua.edu.cn/~zhengyi/CRM/
603
+ * If you find the output unsatisfying, try using different seeds:)
604
+ '''
605
+
606
+ with gr.Blocks() as demo:
607
+ gr.Markdown("# CRM: Single Image to 3D Textured Mesh with Convolutional Reconstruction Model")
608
+ gr.Markdown(_DESCRIPTION)
609
+ with gr.Row():
610
+ with gr.Column():
611
+ with gr.Row():
612
+ image_input = gr.Image(
613
+ label="Image input",
614
+ image_mode="RGBA",
615
+ sources="upload",
616
+ type="pil",
617
+ )
618
+ processed_image = gr.Image(label="Processed Image", interactive=False, type="pil", image_mode="RGB")
619
+ with gr.Row():
620
+ with gr.Column():
621
+ with gr.Row():
622
+ background_choice = gr.Radio([
623
+ "Alpha as mask",
624
+ "Auto Remove background"
625
+ ], value="Auto Remove background",
626
+ label="backgroud choice")
627
+ # do_remove_background = gr.Checkbox(label=, value=True)
628
+ # force_remove = gr.Checkbox(label=, value=False)
629
+ back_groud_color = gr.ColorPicker(label="Background Color", value="#7F7F7F", interactive=False)
630
+ foreground_ratio = gr.Slider(
631
+ label="Foreground Ratio",
632
+ minimum=0.5,
633
+ maximum=1.0,
634
+ value=1.0,
635
+ step=0.05,
636
+ )
637
+
638
+ with gr.Column():
639
+ seed = gr.Number(value=1234, label="seed", precision=0)
640
+ guidance_scale = gr.Number(value=5.5, minimum=3, maximum=10, label="guidance_scale")
641
+ step = gr.Number(value=30, minimum=30, maximum=100, label="sample steps", precision=0)
642
+ text_button = gr.Button("Generate 3D shape")
643
+ gr.Examples(
644
+ examples=[os.path.join("examples", i) for i in os.listdir("examples")],
645
+ inputs=[image_input],
646
+ examples_per_page = 20,
647
+ )
648
+ with gr.Column():
649
+ image_output = gr.Image(interactive=False, label="Output RGB image")
650
+ xyz_ouput = gr.Image(interactive=False, label="Output CCM image")
651
+
652
+ output_model = gr.Model3D(
653
+ label="Output OBJ",
654
+ interactive=False,
655
+ )
656
+ gr.Markdown("Note: Ensure that the input image is correctly pre-processed into a grey background, otherwise the results will be unpredictable.")
657
+
658
+ inputs = [
659
+ processed_image,
660
+ seed,
661
+ guidance_scale,
662
+ step,
663
+ ]
664
+ outputs = [
665
+ image_output,
666
+ xyz_ouput,
667
+ output_model,
668
+ # output_obj,
669
+ ]
670
+
671
+
672
+ text_button.click(fn=check_input_image, inputs=[image_input]).success(
673
+ fn=preprocess_image,
674
+ inputs=[image_input, background_choice, foreground_ratio, back_groud_color],
675
+ outputs=[processed_image],
676
+ ).success(
677
+ fn=gen_image,
678
+ inputs=inputs,
679
+ outputs=outputs,
680
+ )
681
+ demo.queue().launch()