huanngzh commited on
Commit
88f0f17
1 Parent(s): d1dae7c
app.py CHANGED
@@ -105,7 +105,9 @@ with gr.Blocks(css=css) as demo:
105
  gr.Markdown(
106
  f"""# MV-Adapter [Text-to-Multi-View]
107
  Generate 768x768 multi-view images using {base_model} <br>
108
- [[page](https://huanngzh.github.io/MV-Adapter-Page/)] [[repo](https://github.com/huanngzh/MV-Adapter)] [Tips: please follow the prompt template in [usage-guidelines](https://huggingface.co/cagliostrolab/animagine-xl-3.1#usage-guidelines)]
 
 
109
  """
110
  )
111
 
 
105
  gr.Markdown(
106
  f"""# MV-Adapter [Text-to-Multi-View]
107
  Generate 768x768 multi-view images using {base_model} <br>
108
+ [[page](https://huanngzh.github.io/MV-Adapter-Page/)] [[repo](https://github.com/huanngzh/MV-Adapter)] <br>
109
+ Also check our other demos: [Text-to-Multiview (General)](https://huggingface.co/spaces/VAST-AI/MV-Adapter-T2MV-SDXL) | [Text-to-Multiview (Anime)](https://huggingface.co/spaces/huanngzh/MV-Adapter-T2MV-Anime) | [Image-to-Multiview](https://huggingface.co/spaces/VAST-AI/MV-Adapter-I2MV-SDXL) <br>
110
+ **Tips:** please follow the prompt template in [usage-guidelines](https://huggingface.co/cagliostrolab/animagine-xl-3.1#usage-guidelines)
111
  """
112
  )
113
 
mvadapter/pipelines/pipeline_mvadapter_i2mv_sdxl.py CHANGED
@@ -906,6 +906,14 @@ class MVAdapterI2MVSDXLPipeline(StableDiffusionXLPipeline, CustomAdapterMixin):
906
  use_mv=True,
907
  use_ref=True,
908
  ),
 
 
 
 
 
 
 
 
909
  )
910
 
911
  # copy decoupled attention weights from original unet
 
906
  use_mv=True,
907
  use_ref=True,
908
  ),
909
+ set_cross_attn_proc_func=lambda name, hs, cad, ap: self_attn_processor(
910
+ query_dim=hs,
911
+ inner_dim=hs,
912
+ num_views=num_views,
913
+ name=name,
914
+ use_mv=False,
915
+ use_ref=False,
916
+ ),
917
  )
918
 
919
  # copy decoupled attention weights from original unet
mvadapter/pipelines/pipeline_mvadapter_t2mv_sdxl.py CHANGED
@@ -747,6 +747,14 @@ class MVAdapterT2MVSDXLPipeline(StableDiffusionXLPipeline, CustomAdapterMixin):
747
  use_mv=True,
748
  use_ref=False,
749
  ),
 
 
 
 
 
 
 
 
750
  )
751
 
752
  # copy decoupled attention weights from original unet
 
747
  use_mv=True,
748
  use_ref=False,
749
  ),
750
+ set_cross_attn_proc_func=lambda name, hs, cad, ap: self_attn_processor(
751
+ query_dim=hs,
752
+ inner_dim=hs,
753
+ num_views=num_views,
754
+ name=name,
755
+ use_mv=False,
756
+ use_ref=False,
757
+ ),
758
  )
759
 
760
  # copy decoupled attention weights from original unet