Fucius commited on
Commit
b1b004e
1 Parent(s): ce6a1ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -65,12 +65,16 @@ STYLE_NAMES = list(styles.keys())
65
  MAX_SEED = np.iinfo(np.int32).max
66
 
67
  title = r"""
68
- <h1 align="center">OMG: Occlusion-friendly Personalized Multi-concept Generation In Diffusion Models (OMG + InstantID)</h1>
69
  """
70
 
71
  description = r"""
72
- <b>Official 🤗 Gradio demo</b> for <a href='https://github.com/kongzhecn/OMG/' target='_blank'><b>OMG: Occlusion-friendly Personalized Multi-concept Generation In Diffusion Models</b></a>.<be>.<br>
73
- <a href='https://kongzhecn.github.io/omg-project/' target='_blank'><b>[Project]</b></a>.<a href='https://github.com/kongzhecn/OMG/' target='_blank'><b>[Code]</b></a>.<a href='https://arxiv.org/abs/2403.10983/' target='_blank'><b>[Arxiv]</b></a>.<br>
 
 
 
 
74
  How to use:<br>
75
  1. Select two characters.
76
  2. Enter a text prompt as done in normal text-to-image models.
@@ -84,11 +88,11 @@ article = r"""
84
  <br>
85
  If our work is helpful for your research or applications, please cite us via:
86
  ```bibtex
87
- @article{,
88
- title={OMG: Occlusion-friendly Personalized Multi-concept Generation In Diffusion Models},
89
- author={},
90
- journal={},
91
- year={}
92
  }
93
  ```
94
  """
@@ -654,6 +658,7 @@ def main(device, segment_type):
654
  inputs=[prompt, negative_prompt, reference_1, reference_2, resolution, local_prompt1, local_prompt2, seed, style, identitynet_strength_ratio, adapter_strength_ratio, condition, condition_img1, controlnet_ratio, cfg_ratio],
655
  outputs=[gallery, gallery1]
656
  )
 
657
  demo.launch(share=True)
658
 
659
  def parse_args():
 
65
  MAX_SEED = np.iinfo(np.int32).max
66
 
67
  title = r"""
68
+ <h1 align="center"> OMG + InstantID </h1>
69
  """
70
 
71
  description = r"""
72
+ <b>Official 🤗 Gradio demo</b> for <a href='https://github.com/kongzhecn/OMG/' target='_blank'><b>OMG: Occlusion-friendly Personalized Multi-concept Generation In Diffusion Models</b></a>.<be><br>
73
+ <br>
74
+ <a href='https://kongzhecn.github.io/omg-project/' target='_blank'><b>[Project]</b></a><a href='https://github.com/kongzhecn/OMG/' target='_blank'><b>[Code]</b></a><a href='https://arxiv.org/abs/2403.10983/' target='_blank'><b>[Arxiv]</b></a><br>
75
+ <br>
76
+ ❗️<b>Related demos<b>:<a href='https://huggingface.co/spaces/Fucius/OMG/' target='_blank'><b> OMG + LoRAs </b></a>❗️<br>
77
+ <br>
78
  How to use:<br>
79
  1. Select two characters.
80
  2. Enter a text prompt as done in normal text-to-image models.
 
88
  <br>
89
  If our work is helpful for your research or applications, please cite us via:
90
  ```bibtex
91
+ @article{kong2024omg,
92
+ title={OMG: Occlusion-friendly Personalized Multi-concept Generation in Diffusion Models},
93
+ author={Kong, Zhe and Zhang, Yong and Yang, Tianyu and Wang, Tao and Zhang, Kaihao and Wu, Bizhu and Chen, Guanying and Liu, Wei and Luo, Wenhan},
94
+ journal={arXiv preprint arXiv:2403.10983},
95
+ year={2024}
96
  }
97
  ```
98
  """
 
658
  inputs=[prompt, negative_prompt, reference_1, reference_2, resolution, local_prompt1, local_prompt2, seed, style, identitynet_strength_ratio, adapter_strength_ratio, condition, condition_img1, controlnet_ratio, cfg_ratio],
659
  outputs=[gallery, gallery1]
660
  )
661
+ gr.Markdown(article)
662
  demo.launch(share=True)
663
 
664
  def parse_args():