File size: 3,587 Bytes
39178ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97e20d1
39178ef
97e20d1
ab066e4
1b4f8ae
f0115f8
58dd4cf
24cf22b
8cd844d
ab066e4
39178ef
 
 
 
 
24cf22b
de56c0d
aa6b96b
1a2090b
aa6b96b
de56c0d
aa6b96b
1b4f8ae
 
69b6719
 
 
1b4f8ae
39178ef
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
from upcunet_v3 import RealWaifuUpScaler
import gradio as gr
import time
import logging
import os
from PIL import ImageOps
import numpy as np
import math


def greet(input_img, input_model_name, input_tile_mode):
    # if input_img.size[0] * input_img.size[1] > 256 * 256:
    #     y = int(math.sqrt(256*256/input_img.size[0]*input_img.size[1]))
    #     x = int(input_img.size[0]/input_img.size[1]*y)
    #     input_img = ImageOps.fit(input_img, (x, y))
    input_img = np.array(input_img)
    if input_model_name not in model_cache:
        t1 = time.time()
        upscaler = RealWaifuUpScaler(input_model_name[2], ModelPath + input_model_name, half=False, device="cpu")
        t2 = time.time()
        logger.info(f'load model time, {t2 - t1}')
        model_cache[input_model_name] = upscaler
    else:
        upscaler = model_cache[input_model_name]
        logger.info(f'load model from cache')

    start = time.time()
    result = upscaler(input_img, tile_mode=input_tile_mode)
    end = time.time()
    logger.info(f'input_model_name, {input_model_name}')
    logger.info(f'input_tile_mode, {input_tile_mode}')
    logger.info(f'input shape, {input_img.shape}')
    logger.info(f'output shape, {result.shape}')
    logger.info(f'speed time, {end - start}')
    return result


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s")
    logger = logging.getLogger()

    ModelPath = "weights_v3/"
    model_cache = {}

    
    input_model_name = gr.inputs.Dropdown(os.listdir(ModelPath), default="up4x-latest-conservative.pth", label='选择放大倍数及降噪模式')
    input_tile_mode = gr.inputs.Dropdown([0, 1, 2, 3, 4], default=2, label='tile图像输出平铺模式')
    input_img = gr.inputs.Image(label='长安棉花糖♣ ♣点击下方上传图像(不超2MB)', type='pil')

    
    inputs = [input_img, input_model_name, input_tile_mode]
    outputs = "image"
    iface = gr.Interface(fn=greet,
                         inputs=inputs,
                         outputs=outputs,
                         allow_screenshot=False,
                         allow_flagging='never',
                         examples示例=[['efwe0.jpg',]],
                         article='♣棉花糖AI团队学员使用版♣<br>'
                         
                                 '感谢b站开源的项目-Real CUGAN,由于使用的是免费的cpu,使用空间有限,上传的图片过大将会导致内存不足,请尽量不要上传超过 2Mb 的图片!<br>'
                         
                                 '如在使用过程中出现放大不了或内存已满16G的情况,请及时告知群管理:星河或者17老师,以便重新启动程序<br>'
                         
                                 '放大模式使用说明:<br>'
                         
                                 '1.降噪版(denoise):如果原片噪声多,压得烂,推荐使用;目前2倍模型支持了3个降噪等级<br>'
                                 '2.无降噪版(no-denoise):如果原片噪声不多,压得还行,但是想提高分辨率/清晰度/做通用性的增强、修复处理,推荐使用<br>'
                                 '3.保守版(conservative):如果你担心丢失纹理,担心画风被改变,担心颜色被增强,总之就是各种担心AI会留下浓重的处理痕迹,推荐使用该版本。<br>'
                                 '4.tile越大,越省显存,速度越慢')
    iface.launch()