ShaoTengLiu commited on
Commit
8f8fb46
1 Parent(s): 2a1ec0a

add app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -0
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ from subprocess import getoutput
7
+
8
+ import gradio as gr
9
+ import torch
10
+
11
+ from app_inference import create_inference_demo
12
+ from app_training import create_training_demo
13
+ from app_upload import create_upload_demo
14
+ from inference import InferencePipeline
15
+ from trainer import Trainer
16
+
17
+ TITLE = '# [Video-P2P](https://video-p2p.github.io/) UI'
18
+
19
+ ORIGINAL_SPACE_ID = 'Shaldon/Video-P2P-Demo'
20
+ SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
21
+ GPU_DATA = getoutput('nvidia-smi')
22
+ SHARED_UI_WARNING = f'''## Attention - Training doesn't work in this shared UI. You can duplicate and use it with a paid private T4 GPU.
23
+ <center><a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="margin-top:0;margin-bottom:0" src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></center>
24
+ '''
25
+
26
+ if os.getenv('SYSTEM') == 'spaces' and SPACE_ID != ORIGINAL_SPACE_ID:
27
+ SETTINGS = f'<a href="https://huggingface.co/spaces/{SPACE_ID}/settings">Settings</a>'
28
+ else:
29
+ SETTINGS = 'Settings'
30
+
31
+ INVALID_GPU_WARNING = f'''## Attention - the specified GPU is invalid. Training may not work. Make sure you have selected a `T4 GPU` for this task.'''
32
+
33
+ CUDA_NOT_AVAILABLE_WARNING = f'''## Attention - Running on CPU.
34
+ <center>
35
+ You can assign a GPU in the {SETTINGS} tab if you are running this on HF Spaces.
36
+ You can use "T4 small/medium" to run this demo.
37
+ </center>
38
+ '''
39
+
40
+ HF_TOKEN_NOT_SPECIFIED_WARNING = f'''The environment variable `HF_TOKEN` is not specified. Feel free to specify your Hugging Face token with write permission if you don't want to manually provide it for every run.
41
+ <center>
42
+ You can check and create your Hugging Face tokens <a href="https://huggingface.co/settings/tokens" target="_blank">here</a>.
43
+ You can specify environment variables in the "Repository secrets" section of the {SETTINGS} tab.
44
+ </center>
45
+ '''
46
+
47
+ HF_TOKEN = os.getenv('HF_TOKEN')
48
+
49
+
50
+ def show_warning(warning_text: str) -> gr.Blocks:
51
+ with gr.Blocks() as demo:
52
+ with gr.Box():
53
+ gr.Markdown(warning_text)
54
+ return demo
55
+
56
+
57
+ pipe = InferencePipeline(HF_TOKEN)
58
+ trainer = Trainer(HF_TOKEN)
59
+
60
+ with gr.Blocks(css='style.css') as demo:
61
+ if SPACE_ID == ORIGINAL_SPACE_ID:
62
+ show_warning(SHARED_UI_WARNING)
63
+ elif not torch.cuda.is_available():
64
+ show_warning(CUDA_NOT_AVAILABLE_WARNING)
65
+ elif (not 'T4' in GPU_DATA):
66
+ show_warning(INVALID_GPU_WARNING)
67
+
68
+ gr.Markdown(TITLE)
69
+ with gr.Tabs():
70
+ with gr.TabItem('Train'):
71
+ create_training_demo(trainer, pipe)
72
+ with gr.TabItem('Run'):
73
+ create_inference_demo(pipe, HF_TOKEN)
74
+ with gr.TabItem('Upload'):
75
+ gr.Markdown('''
76
+ - You can use this tab to upload models later if you choose not to upload models in training time or if upload in training time failed.
77
+ ''')
78
+ create_upload_demo(HF_TOKEN)
79
+
80
+ if not HF_TOKEN:
81
+ show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING)
82
+
83
+ demo.queue(max_size=1).launch(share=False)