Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	update.
Browse files- .gitattributes +0 -31
 - app.py +1 -1
 - app_2.py +306 -0
 
    	
        .gitattributes
    DELETED
    
    | 
         @@ -1,31 +0,0 @@ 
     | 
|
| 1 | 
         
            -
            *.7z filter=lfs diff=lfs merge=lfs -text
         
     | 
| 2 | 
         
            -
            *.arrow filter=lfs diff=lfs merge=lfs -text
         
     | 
| 3 | 
         
            -
            *.bin filter=lfs diff=lfs merge=lfs -text
         
     | 
| 4 | 
         
            -
            *.bz2 filter=lfs diff=lfs merge=lfs -text
         
     | 
| 5 | 
         
            -
            *.ftz filter=lfs diff=lfs merge=lfs -text
         
     | 
| 6 | 
         
            -
            *.gz filter=lfs diff=lfs merge=lfs -text
         
     | 
| 7 | 
         
            -
            *.h5 filter=lfs diff=lfs merge=lfs -text
         
     | 
| 8 | 
         
            -
            *.joblib filter=lfs diff=lfs merge=lfs -text
         
     | 
| 9 | 
         
            -
            *.lfs.* filter=lfs diff=lfs merge=lfs -text
         
     | 
| 10 | 
         
            -
            *.model filter=lfs diff=lfs merge=lfs -text
         
     | 
| 11 | 
         
            -
            *.msgpack filter=lfs diff=lfs merge=lfs -text
         
     | 
| 12 | 
         
            -
            *.npy filter=lfs diff=lfs merge=lfs -text
         
     | 
| 13 | 
         
            -
            *.npz filter=lfs diff=lfs merge=lfs -text
         
     | 
| 14 | 
         
            -
            *.onnx filter=lfs diff=lfs merge=lfs -text
         
     | 
| 15 | 
         
            -
            *.ot filter=lfs diff=lfs merge=lfs -text
         
     | 
| 16 | 
         
            -
            *.parquet filter=lfs diff=lfs merge=lfs -text
         
     | 
| 17 | 
         
            -
            *.pickle filter=lfs diff=lfs merge=lfs -text
         
     | 
| 18 | 
         
            -
            *.pkl filter=lfs diff=lfs merge=lfs -text
         
     | 
| 19 | 
         
            -
            *.pb filter=lfs diff=lfs merge=lfs -text
         
     | 
| 20 | 
         
            -
            *.pt filter=lfs diff=lfs merge=lfs -text
         
     | 
| 21 | 
         
            -
            *.pth filter=lfs diff=lfs merge=lfs -text
         
     | 
| 22 | 
         
            -
            *.rar filter=lfs diff=lfs merge=lfs -text
         
     | 
| 23 | 
         
            -
            saved_model/**/* filter=lfs diff=lfs merge=lfs -text
         
     | 
| 24 | 
         
            -
            *.tar.* filter=lfs diff=lfs merge=lfs -text
         
     | 
| 25 | 
         
            -
            *.tflite filter=lfs diff=lfs merge=lfs -text
         
     | 
| 26 | 
         
            -
            *.tgz filter=lfs diff=lfs merge=lfs -text
         
     | 
| 27 | 
         
            -
            *.wasm filter=lfs diff=lfs merge=lfs -text
         
     | 
| 28 | 
         
            -
            *.xz filter=lfs diff=lfs merge=lfs -text
         
     | 
| 29 | 
         
            -
            *.zip filter=lfs diff=lfs merge=lfs -text
         
     | 
| 30 | 
         
            -
            *.zst filter=lfs diff=lfs merge=lfs -text
         
     | 
| 31 | 
         
            -
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
    	
        app.py
    CHANGED
    
    | 
         @@ -276,7 +276,7 @@ demo = gr.Interface( 
     | 
|
| 276 | 
         
             
                    gr.Number(value=2, label="Rescaling_Factor (up to 4)"),
         
     | 
| 277 | 
         
             
                    gr.Slider(0, 1, value=0.5, step=0.01, label='Codeformer_Fidelity (0 for better quality, 1 for better identity)')
         
     | 
| 278 | 
         
             
                ], [
         
     | 
| 279 | 
         
            -
                    gr.Image(type="numpy", label="Output")
         
     | 
| 280 | 
         
             
                ],
         
     | 
| 281 | 
         
             
                title=title,
         
     | 
| 282 | 
         
             
                description=description,
         
     | 
| 
         | 
|
| 276 | 
         
             
                    gr.Number(value=2, label="Rescaling_Factor (up to 4)"),
         
     | 
| 277 | 
         
             
                    gr.Slider(0, 1, value=0.5, step=0.01, label='Codeformer_Fidelity (0 for better quality, 1 for better identity)')
         
     | 
| 278 | 
         
             
                ], [
         
     | 
| 279 | 
         
            +
                    gr.Image(type="numpy", label="Output").style(height='auto')
         
     | 
| 280 | 
         
             
                ],
         
     | 
| 281 | 
         
             
                title=title,
         
     | 
| 282 | 
         
             
                description=description,
         
     | 
    	
        app_2.py
    ADDED
    
    | 
         @@ -0,0 +1,306 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            """
         
     | 
| 2 | 
         
            +
            This file is used for deploying hugging face demo:
         
     | 
| 3 | 
         
            +
            https://huggingface.co/spaces/sczhou/CodeFormer
         
     | 
| 4 | 
         
            +
            """
         
     | 
| 5 | 
         
            +
             
     | 
| 6 | 
         
            +
            import sys
         
     | 
| 7 | 
         
            +
            sys.path.append('CodeFormer')
         
     | 
| 8 | 
         
            +
            import os
         
     | 
| 9 | 
         
            +
            import cv2
         
     | 
| 10 | 
         
            +
            import torch
         
     | 
| 11 | 
         
            +
            import torch.nn.functional as F
         
     | 
| 12 | 
         
            +
            import gradio as gr
         
     | 
| 13 | 
         
            +
            from itertools import chain
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            from torchvision.transforms.functional import normalize
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            from basicsr.utils import imwrite, img2tensor, tensor2img
         
     | 
| 18 | 
         
            +
            from basicsr.utils.download_util import load_file_from_url
         
     | 
| 19 | 
         
            +
            from facelib.utils.face_restoration_helper import FaceRestoreHelper
         
     | 
| 20 | 
         
            +
            from facelib.utils.misc import is_gray
         
     | 
| 21 | 
         
            +
            from basicsr.archs.rrdbnet_arch import RRDBNet
         
     | 
| 22 | 
         
            +
            from basicsr.utils.realesrgan_utils import RealESRGANer
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            from basicsr.utils.registry import ARCH_REGISTRY
         
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            os.system("pip freeze")
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            pretrain_model_url = {
         
     | 
| 30 | 
         
            +
                'codeformer': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth',
         
     | 
| 31 | 
         
            +
                'detection': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/detection_Resnet50_Final.pth',
         
     | 
| 32 | 
         
            +
                'parsing': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth',
         
     | 
| 33 | 
         
            +
                'realesrgan': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/RealESRGAN_x2plus.pth'
         
     | 
| 34 | 
         
            +
            }
         
     | 
| 35 | 
         
            +
            # download weights
         
     | 
| 36 | 
         
            +
            if not os.path.exists('CodeFormer/weights/CodeFormer/codeformer.pth'):
         
     | 
| 37 | 
         
            +
                load_file_from_url(url=pretrain_model_url['codeformer'], model_dir='CodeFormer/weights/CodeFormer', progress=True, file_name=None)
         
     | 
| 38 | 
         
            +
            if not os.path.exists('CodeFormer/weights/facelib/detection_Resnet50_Final.pth'):
         
     | 
| 39 | 
         
            +
                load_file_from_url(url=pretrain_model_url['detection'], model_dir='CodeFormer/weights/facelib', progress=True, file_name=None)
         
     | 
| 40 | 
         
            +
            if not os.path.exists('CodeFormer/weights/facelib/parsing_parsenet.pth'):
         
     | 
| 41 | 
         
            +
                load_file_from_url(url=pretrain_model_url['parsing'], model_dir='CodeFormer/weights/facelib', progress=True, file_name=None)
         
     | 
| 42 | 
         
            +
            if not os.path.exists('CodeFormer/weights/realesrgan/RealESRGAN_x2plus.pth'):
         
     | 
| 43 | 
         
            +
                load_file_from_url(url=pretrain_model_url['realesrgan'], model_dir='CodeFormer/weights/realesrgan', progress=True, file_name=None)
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            # download images
         
     | 
| 46 | 
         
            +
            torch.hub.download_url_to_file(
         
     | 
| 47 | 
         
            +
                'https://replicate.com/api/models/sczhou/codeformer/files/fa3fe3d1-76b0-4ca8-ac0d-0a925cb0ff54/06.png',
         
     | 
| 48 | 
         
            +
                '01.png')
         
     | 
| 49 | 
         
            +
            torch.hub.download_url_to_file(
         
     | 
| 50 | 
         
            +
                'https://replicate.com/api/models/sczhou/codeformer/files/a1daba8e-af14-4b00-86a4-69cec9619b53/04.jpg',
         
     | 
| 51 | 
         
            +
                '02.jpg')
         
     | 
| 52 | 
         
            +
            torch.hub.download_url_to_file(
         
     | 
| 53 | 
         
            +
                'https://replicate.com/api/models/sczhou/codeformer/files/542d64f9-1712-4de7-85f7-3863009a7c3d/03.jpg',
         
     | 
| 54 | 
         
            +
                '03.jpg')
         
     | 
| 55 | 
         
            +
            torch.hub.download_url_to_file(
         
     | 
| 56 | 
         
            +
                'https://replicate.com/api/models/sczhou/codeformer/files/a11098b0-a18a-4c02-a19a-9a7045d68426/010.jpg',
         
     | 
| 57 | 
         
            +
                '04.jpg')
         
     | 
| 58 | 
         
            +
            torch.hub.download_url_to_file(
         
     | 
| 59 | 
         
            +
                'https://replicate.com/api/models/sczhou/codeformer/files/7cf19c2c-e0cf-4712-9af8-cf5bdbb8d0ee/012.jpg',
         
     | 
| 60 | 
         
            +
                '05.jpg')
         
     | 
| 61 | 
         
            +
             
     | 
| 62 | 
         
            +
            def imread(img_path):
         
     | 
| 63 | 
         
            +
                img = cv2.imread(img_path)
         
     | 
| 64 | 
         
            +
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
         
     | 
| 65 | 
         
            +
                return img
         
     | 
| 66 | 
         
            +
             
     | 
| 67 | 
         
            +
            # set enhancer with RealESRGAN
         
     | 
| 68 | 
         
            +
            def set_realesrgan():
         
     | 
| 69 | 
         
            +
                half = True if torch.cuda.is_available() else False
         
     | 
| 70 | 
         
            +
                model = RRDBNet(
         
     | 
| 71 | 
         
            +
                    num_in_ch=3,
         
     | 
| 72 | 
         
            +
                    num_out_ch=3,
         
     | 
| 73 | 
         
            +
                    num_feat=64,
         
     | 
| 74 | 
         
            +
                    num_block=23,
         
     | 
| 75 | 
         
            +
                    num_grow_ch=32,
         
     | 
| 76 | 
         
            +
                    scale=2,
         
     | 
| 77 | 
         
            +
                )
         
     | 
| 78 | 
         
            +
                upsampler = RealESRGANer(
         
     | 
| 79 | 
         
            +
                    scale=2,
         
     | 
| 80 | 
         
            +
                    model_path="CodeFormer/weights/realesrgan/RealESRGAN_x2plus.pth",
         
     | 
| 81 | 
         
            +
                    model=model,
         
     | 
| 82 | 
         
            +
                    tile=400,
         
     | 
| 83 | 
         
            +
                    tile_pad=40,
         
     | 
| 84 | 
         
            +
                    pre_pad=0,
         
     | 
| 85 | 
         
            +
                    half=half,
         
     | 
| 86 | 
         
            +
                )
         
     | 
| 87 | 
         
            +
                return upsampler
         
     | 
| 88 | 
         
            +
             
     | 
| 89 | 
         
            +
            upsampler = set_realesrgan()
         
     | 
| 90 | 
         
            +
            device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
         
     | 
| 91 | 
         
            +
            codeformer_net = ARCH_REGISTRY.get("CodeFormer")(
         
     | 
| 92 | 
         
            +
                dim_embd=512,
         
     | 
| 93 | 
         
            +
                codebook_size=1024,
         
     | 
| 94 | 
         
            +
                n_head=8,
         
     | 
| 95 | 
         
            +
                n_layers=9,
         
     | 
| 96 | 
         
            +
                connect_list=["32", "64", "128", "256"],
         
     | 
| 97 | 
         
            +
            ).to(device)
         
     | 
| 98 | 
         
            +
            ckpt_path = "CodeFormer/weights/CodeFormer/codeformer.pth"
         
     | 
| 99 | 
         
            +
            checkpoint = torch.load(ckpt_path)["params_ema"]
         
     | 
| 100 | 
         
            +
            codeformer_net.load_state_dict(checkpoint)
         
     | 
| 101 | 
         
            +
            codeformer_net.eval()
         
     | 
| 102 | 
         
            +
             
     | 
| 103 | 
         
            +
            os.makedirs('output', exist_ok=True)
         
     | 
| 104 | 
         
            +
             
     | 
| 105 | 
         
            +
            def inference(image, background_enhance, face_upsample, upscale, codeformer_fidelity):
         
     | 
| 106 | 
         
            +
                """Run a single prediction on the model"""
         
     | 
| 107 | 
         
            +
                try: # global try
         
     | 
| 108 | 
         
            +
                    # take the default setting for the demo
         
     | 
| 109 | 
         
            +
                    has_aligned = False
         
     | 
| 110 | 
         
            +
                    only_center_face = False
         
     | 
| 111 | 
         
            +
                    draw_box = False
         
     | 
| 112 | 
         
            +
                    detection_model = "retinaface_resnet50"
         
     | 
| 113 | 
         
            +
                    print('Inp:', image, background_enhance, face_upsample, upscale, codeformer_fidelity)
         
     | 
| 114 | 
         
            +
                    
         
     | 
| 115 | 
         
            +
                    if background_enhance is None: background_enhance = True
         
     | 
| 116 | 
         
            +
                    if face_upsample is None: face_upsample = True
         
     | 
| 117 | 
         
            +
                    if upscale is None: upscale = 2
         
     | 
| 118 | 
         
            +
             
     | 
| 119 | 
         
            +
                    img = cv2.imread(str(image), cv2.IMREAD_COLOR)
         
     | 
| 120 | 
         
            +
                    print('\timage size:', img.shape)
         
     | 
| 121 | 
         
            +
             
     | 
| 122 | 
         
            +
                    upscale = int(upscale) # convert type to int
         
     | 
| 123 | 
         
            +
                    if upscale > 4: # avoid memory exceeded due to too large upscale
         
     | 
| 124 | 
         
            +
                        upscale = 4 
         
     | 
| 125 | 
         
            +
                    if upscale > 2 and max(img.shape[:2])>1000: # avoid memory exceeded due to too large img resolution
         
     | 
| 126 | 
         
            +
                        upscale = 2 
         
     | 
| 127 | 
         
            +
                    if max(img.shape[:2]) > 1500: # avoid memory exceeded due to too large img resolution
         
     | 
| 128 | 
         
            +
                        upscale = 1
         
     | 
| 129 | 
         
            +
                        background_enhance = False
         
     | 
| 130 | 
         
            +
                        face_upsample = False
         
     | 
| 131 | 
         
            +
             
     | 
| 132 | 
         
            +
                    face_helper = FaceRestoreHelper(
         
     | 
| 133 | 
         
            +
                        upscale,
         
     | 
| 134 | 
         
            +
                        face_size=512,
         
     | 
| 135 | 
         
            +
                        crop_ratio=(1, 1),
         
     | 
| 136 | 
         
            +
                        det_model=detection_model,
         
     | 
| 137 | 
         
            +
                        save_ext="png",
         
     | 
| 138 | 
         
            +
                        use_parse=True,
         
     | 
| 139 | 
         
            +
                        device=device,
         
     | 
| 140 | 
         
            +
                    )
         
     | 
| 141 | 
         
            +
                    bg_upsampler = upsampler if background_enhance else None
         
     | 
| 142 | 
         
            +
                    face_upsampler = upsampler if face_upsample else None
         
     | 
| 143 | 
         
            +
             
     | 
| 144 | 
         
            +
                    if has_aligned:
         
     | 
| 145 | 
         
            +
                        # the input faces are already cropped and aligned
         
     | 
| 146 | 
         
            +
                        img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)
         
     | 
| 147 | 
         
            +
                        face_helper.is_gray = is_gray(img, threshold=5)
         
     | 
| 148 | 
         
            +
                        if face_helper.is_gray:
         
     | 
| 149 | 
         
            +
                            print('\tgrayscale input: True')
         
     | 
| 150 | 
         
            +
                        face_helper.cropped_faces = [img]
         
     | 
| 151 | 
         
            +
                    else:
         
     | 
| 152 | 
         
            +
                        face_helper.read_image(img)
         
     | 
| 153 | 
         
            +
                        # get face landmarks for each face
         
     | 
| 154 | 
         
            +
                        num_det_faces = face_helper.get_face_landmarks_5(
         
     | 
| 155 | 
         
            +
                        only_center_face=only_center_face, resize=640, eye_dist_threshold=5
         
     | 
| 156 | 
         
            +
                        )
         
     | 
| 157 | 
         
            +
                        print(f'\tdetect {num_det_faces} faces')
         
     | 
| 158 | 
         
            +
                        # align and warp each face
         
     | 
| 159 | 
         
            +
                        face_helper.align_warp_face()
         
     | 
| 160 | 
         
            +
             
     | 
| 161 | 
         
            +
                    # face restoration for each cropped face
         
     | 
| 162 | 
         
            +
                    for idx, cropped_face in enumerate(face_helper.cropped_faces):
         
     | 
| 163 | 
         
            +
                        # prepare data
         
     | 
| 164 | 
         
            +
                        cropped_face_t = img2tensor(
         
     | 
| 165 | 
         
            +
                            cropped_face / 255.0, bgr2rgb=True, float32=True
         
     | 
| 166 | 
         
            +
                        )
         
     | 
| 167 | 
         
            +
                        normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
         
     | 
| 168 | 
         
            +
                        cropped_face_t = cropped_face_t.unsqueeze(0).to(device)
         
     | 
| 169 | 
         
            +
             
     | 
| 170 | 
         
            +
                        try:
         
     | 
| 171 | 
         
            +
                            with torch.no_grad():
         
     | 
| 172 | 
         
            +
                                output = codeformer_net(
         
     | 
| 173 | 
         
            +
                                    cropped_face_t, w=codeformer_fidelity, adain=True
         
     | 
| 174 | 
         
            +
                                )[0]
         
     | 
| 175 | 
         
            +
                                restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
         
     | 
| 176 | 
         
            +
                            del output
         
     | 
| 177 | 
         
            +
                            torch.cuda.empty_cache()
         
     | 
| 178 | 
         
            +
                        except RuntimeError as error:
         
     | 
| 179 | 
         
            +
                            print(f"Failed inference for CodeFormer: {error}")
         
     | 
| 180 | 
         
            +
                            restored_face = tensor2img(
         
     | 
| 181 | 
         
            +
                                cropped_face_t, rgb2bgr=True, min_max=(-1, 1)
         
     | 
| 182 | 
         
            +
                            )
         
     | 
| 183 | 
         
            +
             
     | 
| 184 | 
         
            +
                        restored_face = restored_face.astype("uint8")
         
     | 
| 185 | 
         
            +
                        face_helper.add_restored_face(restored_face)
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
                    # paste_back
         
     | 
| 188 | 
         
            +
                    if not has_aligned:
         
     | 
| 189 | 
         
            +
                        # upsample the background
         
     | 
| 190 | 
         
            +
                        if bg_upsampler is not None:
         
     | 
| 191 | 
         
            +
                            # Now only support RealESRGAN for upsampling background
         
     | 
| 192 | 
         
            +
                            bg_img = bg_upsampler.enhance(img, outscale=upscale)[0]
         
     | 
| 193 | 
         
            +
                        else:
         
     | 
| 194 | 
         
            +
                            bg_img = None
         
     | 
| 195 | 
         
            +
                        face_helper.get_inverse_affine(None)
         
     | 
| 196 | 
         
            +
                        # paste each restored face to the input image
         
     | 
| 197 | 
         
            +
                        if face_upsample and face_upsampler is not None:
         
     | 
| 198 | 
         
            +
                            restored_img = face_helper.paste_faces_to_input_image(
         
     | 
| 199 | 
         
            +
                                upsample_img=bg_img,
         
     | 
| 200 | 
         
            +
                                draw_box=draw_box,
         
     | 
| 201 | 
         
            +
                                face_upsampler=face_upsampler,
         
     | 
| 202 | 
         
            +
                            )
         
     | 
| 203 | 
         
            +
                        else:
         
     | 
| 204 | 
         
            +
                            restored_img = face_helper.paste_faces_to_input_image(
         
     | 
| 205 | 
         
            +
                                upsample_img=bg_img, draw_box=draw_box
         
     | 
| 206 | 
         
            +
                            )
         
     | 
| 207 | 
         
            +
             
     | 
| 208 | 
         
            +
                    # save restored img
         
     | 
| 209 | 
         
            +
                    save_path = f'output/out.png'
         
     | 
| 210 | 
         
            +
                    imwrite(restored_img, str(save_path))
         
     | 
| 211 | 
         
            +
             
     | 
| 212 | 
         
            +
                    restored_img = cv2.cvtColor(restored_img, cv2.COLOR_BGR2RGB)
         
     | 
| 213 | 
         
            +
                    return restored_img
         
     | 
| 214 | 
         
            +
                except Exception as error:
         
     | 
| 215 | 
         
            +
                    print('Global exception', error)
         
     | 
| 216 | 
         
            +
                    return None, None
         
     | 
| 217 | 
         
            +
             
     | 
| 218 | 
         
            +
             
     | 
| 219 | 
         
            +
            title = "CodeFormer: Robust Face Restoration and Enhancement Network"
         
     | 
| 220 | 
         
            +
             
     | 
| 221 | 
         
            +
            description = r"""<center><img src='https://user-images.githubusercontent.com/14334509/189166076-94bb2cac-4f4e-40fb-a69f-66709e3d98f5.png' alt='CodeFormer logo'></center>
         
     | 
| 222 | 
         
            +
            <br>
         
     | 
| 223 | 
         
            +
            <b>Official Gradio demo</b> for <a href='https://github.com/sczhou/CodeFormer' target='_blank'><b>Towards Robust Blind Face Restoration with Codebook Lookup Transformer (NeurIPS 2022)</b></a><br>
         
     | 
| 224 | 
         
            +
            🔥 CodeFormer is a robust face restoration algorithm for old photos or AI-generated faces.<br>
         
     | 
| 225 | 
         
            +
            🤗 Try CodeFormer for improved stable-diffusion generation!<br>
         
     | 
| 226 | 
         
            +
            """
         
     | 
| 227 | 
         
            +
             
     | 
| 228 | 
         
            +
            article = r"""
         
     | 
| 229 | 
         
            +
            If CodeFormer is helpful, please help to ⭐ the <a href='https://github.com/sczhou/CodeFormer' target='_blank'>Github Repo</a>. Thanks! 
         
     | 
| 230 | 
         
            +
            [](https://github.com/sczhou/CodeFormer)
         
     | 
| 231 | 
         
            +
             
     | 
| 232 | 
         
            +
            ---
         
     | 
| 233 | 
         
            +
             
     | 
| 234 | 
         
            +
            📝 **Citation**
         
     | 
| 235 | 
         
            +
             
     | 
| 236 | 
         
            +
            If our work is useful for your research, please consider citing:
         
     | 
| 237 | 
         
            +
            ```bibtex
         
     | 
| 238 | 
         
            +
            @inproceedings{zhou2022codeformer,
         
     | 
| 239 | 
         
            +
                author = {Zhou, Shangchen and Chan, Kelvin C.K. and Li, Chongyi and Loy, Chen Change},
         
     | 
| 240 | 
         
            +
                title = {Towards Robust Blind Face Restoration with Codebook Lookup TransFormer},
         
     | 
| 241 | 
         
            +
                booktitle = {NeurIPS},
         
     | 
| 242 | 
         
            +
                year = {2022}
         
     | 
| 243 | 
         
            +
            }
         
     | 
| 244 | 
         
            +
            ```
         
     | 
| 245 | 
         
            +
             
     | 
| 246 | 
         
            +
            📋 **License**
         
     | 
| 247 | 
         
            +
             
     | 
| 248 | 
         
            +
            This project is licensed under <a rel="license" href="https://github.com/sczhou/CodeFormer/blob/master/LICENSE">S-Lab License 1.0</a>. 
         
     | 
| 249 | 
         
            +
            Redistribution and use for non-commercial purposes should follow this license.
         
     | 
| 250 | 
         
            +
             
     | 
| 251 | 
         
            +
            📧 **Contact**
         
     | 
| 252 | 
         
            +
             
     | 
| 253 | 
         
            +
            If you have any questions, please feel free to reach me out at <b>shangchenzhou@gmail.com</b>.
         
     | 
| 254 | 
         
            +
             
     | 
| 255 | 
         
            +
            🤗 **Find Me:**
         
     | 
| 256 | 
         
            +
            <style type="text/css">
         
     | 
| 257 | 
         
            +
            td {
         
     | 
| 258 | 
         
            +
                padding-right: 0px !important;
         
     | 
| 259 | 
         
            +
            }
         
     | 
| 260 | 
         
            +
            </style>
         
     | 
| 261 | 
         
            +
             
     | 
| 262 | 
         
            +
            <table>
         
     | 
| 263 | 
         
            +
            <tr>
         
     | 
| 264 | 
         
            +
                <td><a href="https://github.com/sczhou"><img style="margin:-0.8em 0 2em 0" src="https://img.shields.io/github/followers/sczhou?style=social" alt="Github Follow"></a></td>
         
     | 
| 265 | 
         
            +
                <td><a href="https://twitter.com/ShangchenZhou"><img style="margin:-0.8em 0 2em 0" src="https://img.shields.io/twitter/follow/ShangchenZhou?label=%40ShangchenZhou&style=social" alt="Twitter Follow"></a></td>
         
     | 
| 266 | 
         
            +
            </tr>
         
     | 
| 267 | 
         
            +
            </table>
         
     | 
| 268 | 
         
            +
             
     | 
| 269 | 
         
            +
            <center><img src='https://api.infinitescript.com/badgen/count?name=sczhou/CodeFormer<ext=Visitors&color=6dc9aa' alt='visitors'></center>
         
     | 
| 270 | 
         
            +
            """
         
     | 
| 271 | 
         
            +
             
     | 
| 272 | 
         
            +
            with gr.Blocks() as demo:
         
     | 
| 273 | 
         
            +
                gr.Markdown(title)
         
     | 
| 274 | 
         
            +
                gr.Markdown(description)
         
     | 
| 275 | 
         
            +
                with gr.Box():
         
     | 
| 276 | 
         
            +
                    with gr.Column():
         
     | 
| 277 | 
         
            +
                        input_img = gr.Image(type="filepath", label="Input")
         
     | 
| 278 | 
         
            +
                        background_enhance = gr.Checkbox(value=True, label="Background_Enhance")
         
     | 
| 279 | 
         
            +
                        face_enhance = gr.Checkbox(value=True, label="Face_Upsample")
         
     | 
| 280 | 
         
            +
                        upscale_factor = gr.Number(value=2, label="Rescaling_Factor (up to 4)")
         
     | 
| 281 | 
         
            +
                        codeformer_fidelity = gr.Slider(0, 1, value=0.5, step=0.01, label='Codeformer_Fidelity (0 for better quality, 1 for better identity)')
         
     | 
| 282 | 
         
            +
                        submit = gr.Button('Enhance Image')
         
     | 
| 283 | 
         
            +
                    with gr.Column():
         
     | 
| 284 | 
         
            +
                        output_img = gr.Image(type="numpy", label="Output").style(height='auto')
         
     | 
| 285 | 
         
            +
                        
         
     | 
| 286 | 
         
            +
                inps = [input_img, background_enhance, face_enhance, upscale_factor, codeformer_fidelity]
         
     | 
| 287 | 
         
            +
                submit.click(fn=inference, inputs=inps, outputs=[output_img])
         
     | 
| 288 | 
         
            +
                        
         
     | 
| 289 | 
         
            +
                ex = gr.Examples([
         
     | 
| 290 | 
         
            +
                    ['01.png', True, True, 2, 0.7],
         
     | 
| 291 | 
         
            +
                    ['02.jpg', True, True, 2, 0.7],
         
     | 
| 292 | 
         
            +
                    ['03.jpg', True, True, 2, 0.7],
         
     | 
| 293 | 
         
            +
                    ['04.jpg', True, True, 2, 0.1],
         
     | 
| 294 | 
         
            +
                    ['05.jpg', True, True, 2, 0.1]
         
     | 
| 295 | 
         
            +
                  ],
         
     | 
| 296 | 
         
            +
                    fn=inference,
         
     | 
| 297 | 
         
            +
                    inputs=inps,
         
     | 
| 298 | 
         
            +
                    outputs=[output_img],
         
     | 
| 299 | 
         
            +
                    cache_examples=True)
         
     | 
| 300 | 
         
            +
                
         
     | 
| 301 | 
         
            +
                gr.Markdown(article)
         
     | 
| 302 | 
         
            +
                
         
     | 
| 303 | 
         
            +
                
         
     | 
| 304 | 
         
            +
            DEBUG = os.getenv('DEBUG') == '1'
         
     | 
| 305 | 
         
            +
            demo.queue(api_open=False, concurrency_count=2, max_size=10)
         
     | 
| 306 | 
         
            +
            demo.launch(debug=DEBUG)
         
     |