import{s as Se,o as Ie,n as Le}from"../chunks/scheduler.8c3d61f6.js";import{S as Ge,i as Ve,g as d,s as n,r as p,A as Ne,h as f,f as s,c as i,j as F,u,x as $,k as C,y as t,a as m,v as _,d as h,t as g,w as A}from"../chunks/index.da70eac4.js";import{T as Ce}from"../chunks/Tip.1d9b8c37.js";import{D as Z}from"../chunks/Docstring.6b390b9a.js";import{H as ke,E as ze}from"../chunks/EditOnGithub.1e64e623.js";function He(X){let o,P="This API is 🧪 experimental.";return{c(){o=d("p"),o.textContent=P},l(c){o=f(c,"P",{"data-svelte-h":!0}),$(o)!=="svelte-89q1io"&&(o.textContent=P)},m(c,T){m(c,o,T)},p:Le,d(c){c&&s(o)}}}function je(X){let o,P="This API is 🧪 experimental.";return{c(){o=d("p"),o.textContent=P},l(c){o=f(c,"P",{"data-svelte-h":!0}),$(o)!=="svelte-89q1io"&&(o.textContent=P)},m(c,T){m(c,o,T)},p:Le,d(c){c&&s(o)}}}function Oe(X){let o,P,c,T,k,te,L,ve='A Transformer model for image-like data from <a href="https://huggingface.co/papers/2310.00426" rel="nofollow">PixArt-Alpha</a> and <a href="https://huggingface.co/papers/2403.04692" rel="nofollow">PixArt-Sigma</a>.',oe,S,se,a,I,de,q,$e=`A 2D Transformer model as introduced in PixArt family of models (<a href="https://arxiv.org/abs/2310.00426" rel="nofollow">https://arxiv.org/abs/2310.00426</a>,
<a href="https://arxiv.org/abs/2403.04692" rel="nofollow">https://arxiv.org/abs/2403.04692</a>).`,fe,y,G,ce,E,Te='The <a href="/docs/diffusers/v0.32.0/en/api/models/pixart_transformer2d#diffusers.PixArtTransformer2DModel">PixArtTransformer2DModel</a> forward method.',le,x,V,me,K,ye=`Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
are fused. For cross-attention modules, key and value projection matrices are fused.`,pe,D,ue,w,N,_e,U,De="Sets the attention processor to use to compute attention.",he,b,z,ge,J,we="Disables custom attention processors and sets the default attention implementation.",Ae,R,Me="Safe to just use <code>AttnProcessor()</code> as PixArt doesn’t have any exotic attention processors in default model.",Pe,v,H,xe,W,Fe="Disables the fused QKV projection if enabled.",be,M,re,j,ne,ee,ie;return k=new ke({props:{title:"PixArtTransformer2DModel",local:"pixarttransformer2dmodel",headingTag:"h1"}}),S=new ke({props:{title:"PixArtTransformer2DModel",local:"diffusers.PixArtTransformer2DModel",headingTag:"h2"}}),I=new Z({props:{name:"class diffusers.PixArtTransformer2DModel",anchor:"diffusers.PixArtTransformer2DModel",parameters:[{name:"num_attention_heads",val:": int = 16"},{name:"attention_head_dim",val:": int = 72"},{name:"in_channels",val:": int = 4"},{name:"out_channels",val:": typing.Optional[int] = 8"},{name:"num_layers",val:": int = 28"},{name:"dropout",val:": float = 0.0"},{name:"norm_num_groups",val:": int = 32"},{name:"cross_attention_dim",val:": typing.Optional[int] = 1152"},{name:"attention_bias",val:": bool = True"},{name:"sample_size",val:": int = 128"},{name:"patch_size",val:": int = 2"},{name:"activation_fn",val:": str = 'gelu-approximate'"},{name:"num_embeds_ada_norm",val:": typing.Optional[int] = 1000"},{name:"upcast_attention",val:": bool = False"},{name:"norm_type",val:": str = 'ada_norm_single'"},{name:"norm_elementwise_affine",val:": bool = False"},{name:"norm_eps",val:": float = 1e-06"},{name:"interpolation_scale",val:": typing.Optional[int] = None"},{name:"use_additional_conditions",val:": typing.Optional[bool] = None"},{name:"caption_channels",val:": typing.Optional[int] = None"},{name:"attention_type",val:": typing.Optional[str] = 'default'"}],parametersDescription:[{anchor:"diffusers.PixArtTransformer2DModel.num_attention_heads",description:"<strong>num_attention_heads</strong> (int, optional, defaults to 16) &#x2014; The number of heads to use for multi-head attention.",name:"num_attention_heads"},{anchor:"diffusers.PixArtTransformer2DModel.attention_head_dim",description:"<strong>attention_head_dim</strong> (int, optional, defaults to 72) &#x2014; The number of channels in each head.",name:"attention_head_dim"},{anchor:"diffusers.PixArtTransformer2DModel.in_channels",description:"<strong>in_channels</strong> (int, defaults to 4) &#x2014; The number of channels in the input.",name:"in_channels"},{anchor:"diffusers.PixArtTransformer2DModel.out_channels",description:`<strong>out_channels</strong> (int, optional) &#x2014;
The number of channels in the output. Specify this parameter if the output channel number differs from the
input.`,name:"out_channels"},{anchor:"diffusers.PixArtTransformer2DModel.num_layers",description:"<strong>num_layers</strong> (int, optional, defaults to 28) &#x2014; The number of layers of Transformer blocks to use.",name:"num_layers"},{anchor:"diffusers.PixArtTransformer2DModel.dropout",description:"<strong>dropout</strong> (float, optional, defaults to 0.0) &#x2014; The dropout probability to use within the Transformer blocks.",name:"dropout"},{anchor:"diffusers.PixArtTransformer2DModel.norm_num_groups",description:`<strong>norm_num_groups</strong> (int, optional, defaults to 32) &#x2014;
Number of groups for group normalization within Transformer blocks.`,name:"norm_num_groups"},{anchor:"diffusers.PixArtTransformer2DModel.cross_attention_dim",description:`<strong>cross_attention_dim</strong> (int, optional) &#x2014;
The dimensionality for cross-attention layers, typically matching the encoder&#x2019;s hidden dimension.`,name:"cross_attention_dim"},{anchor:"diffusers.PixArtTransformer2DModel.attention_bias",description:`<strong>attention_bias</strong> (bool, optional, defaults to True) &#x2014;
Configure if the Transformer blocks&#x2019; attention should contain a bias parameter.`,name:"attention_bias"},{anchor:"diffusers.PixArtTransformer2DModel.sample_size",description:`<strong>sample_size</strong> (int, defaults to 128) &#x2014;
The width of the latent images. This parameter is fixed during training.`,name:"sample_size"},{anchor:"diffusers.PixArtTransformer2DModel.patch_size",description:`<strong>patch_size</strong> (int, defaults to 2) &#x2014;
Size of the patches the model processes, relevant for architectures working on non-sequential data.`,name:"patch_size"},{anchor:"diffusers.PixArtTransformer2DModel.activation_fn",description:`<strong>activation_fn</strong> (str, optional, defaults to &#x201C;gelu-approximate&#x201D;) &#x2014;
Activation function to use in feed-forward networks within Transformer blocks.`,name:"activation_fn"},{anchor:"diffusers.PixArtTransformer2DModel.num_embeds_ada_norm",description:`<strong>num_embeds_ada_norm</strong> (int, optional, defaults to 1000) &#x2014;
Number of embeddings for AdaLayerNorm, fixed during training and affects the maximum denoising steps during
inference.`,name:"num_embeds_ada_norm"},{anchor:"diffusers.PixArtTransformer2DModel.upcast_attention",description:`<strong>upcast_attention</strong> (bool, optional, defaults to False) &#x2014;
If true, upcasts the attention mechanism dimensions for potentially improved performance.`,name:"upcast_attention"},{anchor:"diffusers.PixArtTransformer2DModel.norm_type",description:`<strong>norm_type</strong> (str, optional, defaults to &#x201C;ada_norm_zero&#x201D;) &#x2014;
Specifies the type of normalization used, can be &#x2018;ada_norm_zero&#x2019;.`,name:"norm_type"},{anchor:"diffusers.PixArtTransformer2DModel.norm_elementwise_affine",description:`<strong>norm_elementwise_affine</strong> (bool, optional, defaults to False) &#x2014;
If true, enables element-wise affine parameters in the normalization layers.`,name:"norm_elementwise_affine"},{anchor:"diffusers.PixArtTransformer2DModel.norm_eps",description:`<strong>norm_eps</strong> (float, optional, defaults to 1e-6) &#x2014;
A small constant added to the denominator in normalization layers to prevent division by zero.`,name:"norm_eps"},{anchor:"diffusers.PixArtTransformer2DModel.interpolation_scale",description:"<strong>interpolation_scale</strong> (int, optional) &#x2014; Scale factor to use during interpolating the position embeddings.",name:"interpolation_scale"},{anchor:"diffusers.PixArtTransformer2DModel.use_additional_conditions",description:"<strong>use_additional_conditions</strong> (bool, optional) &#x2014; If we&#x2019;re using additional conditions as inputs.",name:"use_additional_conditions"},{anchor:"diffusers.PixArtTransformer2DModel.attention_type",description:"<strong>attention_type</strong> (str, optional, defaults to &#x201C;default&#x201D;) &#x2014; Kind of attention mechanism to be used.",name:"attention_type"},{anchor:"diffusers.PixArtTransformer2DModel.caption_channels",description:`<strong>caption_channels</strong> (int, optional, defaults to None) &#x2014;
Number of channels to use for projecting the caption embeddings.`,name:"caption_channels"},{anchor:"diffusers.PixArtTransformer2DModel.use_linear_projection",description:`<strong>use_linear_projection</strong> (bool, optional, defaults to False) &#x2014;
Deprecated argument. Will be removed in a future version.`,name:"use_linear_projection"},{anchor:"diffusers.PixArtTransformer2DModel.num_vector_embeds",description:`<strong>num_vector_embeds</strong> (bool, optional, defaults to False) &#x2014;
Deprecated argument. Will be removed in a future version.`,name:"num_vector_embeds"}],source:"https://github.com/huggingface/diffusers/blob/v0.32.0/src/diffusers/models/transformers/pixart_transformer_2d.py#L32"}}),G=new Z({props:{name:"forward",anchor:"diffusers.PixArtTransformer2DModel.forward",parameters:[{name:"hidden_states",val:": Tensor"},{name:"encoder_hidden_states",val:": typing.Optional[torch.Tensor] = None"},{name:"timestep",val:": typing.Optional[torch.LongTensor] = None"},{name:"added_cond_kwargs",val:": typing.Dict[str, torch.Tensor] = None"},{name:"cross_attention_kwargs",val:": typing.Dict[str, typing.Any] = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"encoder_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"return_dict",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.PixArtTransformer2DModel.forward.hidden_states",description:`<strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch size, channel, height, width)</code>) &#x2014;
Input <code>hidden_states</code>.`,name:"hidden_states"},{anchor:"diffusers.PixArtTransformer2DModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch size, sequence len, embed dims)</code>, <em>optional</em>) &#x2014;
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.`,name:"encoder_hidden_states"},{anchor:"diffusers.PixArtTransformer2DModel.forward.timestep",description:`<strong>timestep</strong> (<code>torch.LongTensor</code>, <em>optional</em>) &#x2014;
Used to indicate denoising step. Optional timestep to be applied as an embedding in <code>AdaLayerNorm</code>.`,name:"timestep"},{anchor:"diffusers.PixArtTransformer2DModel.forward.added_cond_kwargs",description:"<strong>added_cond_kwargs</strong> &#x2014; (<code>Dict[str, Any]</code>, <em>optional</em>): Additional conditions to be used as inputs.",name:"added_cond_kwargs"},{anchor:"diffusers.PixArtTransformer2DModel.forward.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> ( <code>Dict[str, Any]</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.PixArtTransformer2DModel.forward.attention_mask",description:`<strong>attention_mask</strong> ( <code>torch.Tensor</code>, <em>optional</em>) &#x2014;
An attention mask of shape <code>(batch, key_tokens)</code> is applied to <code>encoder_hidden_states</code>. If <code>1</code> the mask
is kept, otherwise if <code>0</code> it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to &#x201C;discard&#x201D; tokens.`,name:"attention_mask"},{anchor:"diffusers.PixArtTransformer2DModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> ( <code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Cross-attention mask applied to <code>encoder_hidden_states</code>. Two formats supported:</p>
<ul>
<li>Mask <code>(batch, sequence_length)</code> True = keep, False = discard.</li>
<li>Bias <code>(batch, 1, sequence_length)</code> 0 = keep, -10000 = discard.</li>
</ul>
<p>If <code>ndim == 2</code>: will be interpreted as a mask, then converted into a bias consistent with the format
above. This bias will be added to the cross-attention scores.`,name:"encoder_attention_mask"},{anchor:"diffusers.PixArtTransformer2DModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <a href="/docs/diffusers/v0.32.0/en/api/models/unet2d-cond#diffusers.models.unets.unet_2d_condition.UNet2DConditionOutput">UNet2DConditionOutput</a> instead of a plain
tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/diffusers/blob/v0.32.0/src/diffusers/models/transformers/pixart_transformer_2d.py#L298",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>


<p>If <code>return_dict</code> is True, an <code>~models.transformer_2d.Transformer2DModelOutput</code> is returned, otherwise a
<code>tuple</code> where the first element is the sample tensor.</p>
`}}),V=new Z({props:{name:"fuse_qkv_projections",anchor:"diffusers.PixArtTransformer2DModel.fuse_qkv_projections",parameters:[],source:"https://github.com/huggingface/diffusers/blob/v0.32.0/src/diffusers/models/transformers/pixart_transformer_2d.py#L259"}}),D=new Ce({props:{warning:!0,$$slots:{default:[He]},$$scope:{ctx:X}}}),N=new Z({props:{name:"set_attn_processor",anchor:"diffusers.PixArtTransformer2DModel.set_attn_processor",parameters:[{name:"processor",val:": typing.Union[diffusers.models.attention_processor.AttnProcessor, diffusers.models.attention_processor.CustomDiffusionAttnProcessor, diffusers.models.attention_processor.AttnAddedKVProcessor, diffusers.models.attention_processor.AttnAddedKVProcessor2_0, diffusers.models.attention_processor.JointAttnProcessor2_0, diffusers.models.attention_processor.PAGJointAttnProcessor2_0, diffusers.models.attention_processor.PAGCFGJointAttnProcessor2_0, diffusers.models.attention_processor.FusedJointAttnProcessor2_0, diffusers.models.attention_processor.AllegroAttnProcessor2_0, diffusers.models.attention_processor.AuraFlowAttnProcessor2_0, diffusers.models.attention_processor.FusedAuraFlowAttnProcessor2_0, diffusers.models.attention_processor.FluxAttnProcessor2_0, diffusers.models.attention_processor.FluxAttnProcessor2_0_NPU, diffusers.models.attention_processor.FusedFluxAttnProcessor2_0, diffusers.models.attention_processor.FusedFluxAttnProcessor2_0_NPU, diffusers.models.attention_processor.CogVideoXAttnProcessor2_0, diffusers.models.attention_processor.FusedCogVideoXAttnProcessor2_0, diffusers.models.attention_processor.XFormersAttnAddedKVProcessor, diffusers.models.attention_processor.XFormersAttnProcessor, diffusers.models.attention_processor.XLAFlashAttnProcessor2_0, diffusers.models.attention_processor.AttnProcessorNPU, diffusers.models.attention_processor.AttnProcessor2_0, diffusers.models.attention_processor.MochiVaeAttnProcessor2_0, diffusers.models.attention_processor.MochiAttnProcessor2_0, diffusers.models.attention_processor.StableAudioAttnProcessor2_0, diffusers.models.attention_processor.HunyuanAttnProcessor2_0, diffusers.models.attention_processor.FusedHunyuanAttnProcessor2_0, diffusers.models.attention_processor.PAGHunyuanAttnProcessor2_0, diffusers.models.attention_processor.PAGCFGHunyuanAttnProcessor2_0, diffusers.models.attention_processor.LuminaAttnProcessor2_0, diffusers.models.attention_processor.FusedAttnProcessor2_0, diffusers.models.attention_processor.CustomDiffusionXFormersAttnProcessor, diffusers.models.attention_processor.CustomDiffusionAttnProcessor2_0, diffusers.models.attention_processor.SlicedAttnProcessor, diffusers.models.attention_processor.SlicedAttnAddedKVProcessor, diffusers.models.attention_processor.SanaLinearAttnProcessor2_0, diffusers.models.attention_processor.PAGCFGSanaLinearAttnProcessor2_0, diffusers.models.attention_processor.PAGIdentitySanaLinearAttnProcessor2_0, diffusers.models.attention_processor.SanaMultiscaleLinearAttention, diffusers.models.attention_processor.SanaMultiscaleAttnProcessor2_0, diffusers.models.attention_processor.SanaMultiscaleAttentionProjection, diffusers.models.attention_processor.IPAdapterAttnProcessor, diffusers.models.attention_processor.IPAdapterAttnProcessor2_0, diffusers.models.attention_processor.IPAdapterXFormersAttnProcessor, diffusers.models.attention_processor.SD3IPAdapterJointAttnProcessor2_0, diffusers.models.attention_processor.PAGIdentitySelfAttnProcessor2_0, diffusers.models.attention_processor.PAGCFGIdentitySelfAttnProcessor2_0, diffusers.models.attention_processor.LoRAAttnProcessor, diffusers.models.attention_processor.LoRAAttnProcessor2_0, diffusers.models.attention_processor.LoRAXFormersAttnProcessor, diffusers.models.attention_processor.LoRAAttnAddedKVProcessor, typing.Dict[str, typing.Union[diffusers.models.attention_processor.AttnProcessor, diffusers.models.attention_processor.CustomDiffusionAttnProcessor, diffusers.models.attention_processor.AttnAddedKVProcessor, diffusers.models.attention_processor.AttnAddedKVProcessor2_0, diffusers.models.attention_processor.JointAttnProcessor2_0, diffusers.models.attention_processor.PAGJointAttnProcessor2_0, diffusers.models.attention_processor.PAGCFGJointAttnProcessor2_0, diffusers.models.attention_processor.FusedJointAttnProcessor2_0, diffusers.models.attention_processor.AllegroAttnProcessor2_0, diffusers.models.attention_processor.AuraFlowAttnProcessor2_0, diffusers.models.attention_processor.FusedAuraFlowAttnProcessor2_0, diffusers.models.attention_processor.FluxAttnProcessor2_0, diffusers.models.attention_processor.FluxAttnProcessor2_0_NPU, diffusers.models.attention_processor.FusedFluxAttnProcessor2_0, diffusers.models.attention_processor.FusedFluxAttnProcessor2_0_NPU, diffusers.models.attention_processor.CogVideoXAttnProcessor2_0, diffusers.models.attention_processor.FusedCogVideoXAttnProcessor2_0, diffusers.models.attention_processor.XFormersAttnAddedKVProcessor, diffusers.models.attention_processor.XFormersAttnProcessor, diffusers.models.attention_processor.XLAFlashAttnProcessor2_0, diffusers.models.attention_processor.AttnProcessorNPU, diffusers.models.attention_processor.AttnProcessor2_0, diffusers.models.attention_processor.MochiVaeAttnProcessor2_0, diffusers.models.attention_processor.MochiAttnProcessor2_0, diffusers.models.attention_processor.StableAudioAttnProcessor2_0, diffusers.models.attention_processor.HunyuanAttnProcessor2_0, diffusers.models.attention_processor.FusedHunyuanAttnProcessor2_0, diffusers.models.attention_processor.PAGHunyuanAttnProcessor2_0, diffusers.models.attention_processor.PAGCFGHunyuanAttnProcessor2_0, diffusers.models.attention_processor.LuminaAttnProcessor2_0, diffusers.models.attention_processor.FusedAttnProcessor2_0, diffusers.models.attention_processor.CustomDiffusionXFormersAttnProcessor, diffusers.models.attention_processor.CustomDiffusionAttnProcessor2_0, diffusers.models.attention_processor.SlicedAttnProcessor, diffusers.models.attention_processor.SlicedAttnAddedKVProcessor, diffusers.models.attention_processor.SanaLinearAttnProcessor2_0, diffusers.models.attention_processor.PAGCFGSanaLinearAttnProcessor2_0, diffusers.models.attention_processor.PAGIdentitySanaLinearAttnProcessor2_0, diffusers.models.attention_processor.SanaMultiscaleLinearAttention, diffusers.models.attention_processor.SanaMultiscaleAttnProcessor2_0, diffusers.models.attention_processor.SanaMultiscaleAttentionProjection, diffusers.models.attention_processor.IPAdapterAttnProcessor, diffusers.models.attention_processor.IPAdapterAttnProcessor2_0, diffusers.models.attention_processor.IPAdapterXFormersAttnProcessor, diffusers.models.attention_processor.SD3IPAdapterJointAttnProcessor2_0, diffusers.models.attention_processor.PAGIdentitySelfAttnProcessor2_0, diffusers.models.attention_processor.PAGCFGIdentitySelfAttnProcessor2_0, diffusers.models.attention_processor.LoRAAttnProcessor, diffusers.models.attention_processor.LoRAAttnProcessor2_0, diffusers.models.attention_processor.LoRAXFormersAttnProcessor, diffusers.models.attention_processor.LoRAAttnAddedKVProcessor]]]"}],parametersDescription:[{anchor:"diffusers.PixArtTransformer2DModel.set_attn_processor.processor",description:`<strong>processor</strong> (<code>dict</code> of <code>AttentionProcessor</code> or only <code>AttentionProcessor</code>) &#x2014;
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for <strong>all</strong> <code>Attention</code> layers.</p>
<p>If <code>processor</code> is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.`,name:"processor"}],source:"https://github.com/huggingface/diffusers/blob/v0.32.0/src/diffusers/models/transformers/pixart_transformer_2d.py#L216"}}),z=new Z({props:{name:"set_default_attn_processor",anchor:"diffusers.PixArtTransformer2DModel.set_default_attn_processor",parameters:[],source:"https://github.com/huggingface/diffusers/blob/v0.32.0/src/diffusers/models/transformers/pixart_transformer_2d.py#L250"}}),H=new Z({props:{name:"unfuse_qkv_projections",anchor:"diffusers.PixArtTransformer2DModel.unfuse_qkv_projections",parameters:[],source:"https://github.com/huggingface/diffusers/blob/v0.32.0/src/diffusers/models/transformers/pixart_transformer_2d.py#L285"}}),M=new Ce({props:{warning:!0,$$slots:{default:[je]},$$scope:{ctx:X}}}),j=new ze({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/models/pixart_transformer2d.md"}}),{c(){o=d("meta"),P=n(),c=d("p"),T=n(),p(k.$$.fragment),te=n(),L=d("p"),L.innerHTML=ve,oe=n(),p(S.$$.fragment),se=n(),a=d("div"),p(I.$$.fragment),de=n(),q=d("p"),q.innerHTML=$e,fe=n(),y=d("div"),p(G.$$.fragment),ce=n(),E=d("p"),E.innerHTML=Te,le=n(),x=d("div"),p(V.$$.fragment),me=n(),K=d("p"),K.textContent=ye,pe=n(),p(D.$$.fragment),ue=n(),w=d("div"),p(N.$$.fragment),_e=n(),U=d("p"),U.textContent=De,he=n(),b=d("div"),p(z.$$.fragment),ge=n(),J=d("p"),J.textContent=we,Ae=n(),R=d("p"),R.innerHTML=Me,Pe=n(),v=d("div"),p(H.$$.fragment),xe=n(),W=d("p"),W.textContent=Fe,be=n(),p(M.$$.fragment),re=n(),p(j.$$.fragment),ne=n(),ee=d("p"),this.h()},l(e){const r=Ne("svelte-u9bgzb",document.head);o=f(r,"META",{name:!0,content:!0}),r.forEach(s),P=i(e),c=f(e,"P",{}),F(c).forEach(s),T=i(e),u(k.$$.fragment,e),te=i(e),L=f(e,"P",{"data-svelte-h":!0}),$(L)!=="svelte-p8z7gn"&&(L.innerHTML=ve),oe=i(e),u(S.$$.fragment,e),se=i(e),a=f(e,"DIV",{class:!0});var l=F(a);u(I.$$.fragment,l),de=i(l),q=f(l,"P",{"data-svelte-h":!0}),$(q)!=="svelte-1pwq02h"&&(q.innerHTML=$e),fe=i(l),y=f(l,"DIV",{class:!0});var O=F(y);u(G.$$.fragment,O),ce=i(O),E=f(O,"P",{"data-svelte-h":!0}),$(E)!=="svelte-1wdxeg3"&&(E.innerHTML=Te),O.forEach(s),le=i(l),x=f(l,"DIV",{class:!0});var Q=F(x);u(V.$$.fragment,Q),me=i(Q),K=f(Q,"P",{"data-svelte-h":!0}),$(K)!=="svelte-1254b9i"&&(K.textContent=ye),pe=i(Q),u(D.$$.fragment,Q),Q.forEach(s),ue=i(l),w=f(l,"DIV",{class:!0});var ae=F(w);u(N.$$.fragment,ae),_e=i(ae),U=f(ae,"P",{"data-svelte-h":!0}),$(U)!=="svelte-1o77hl2"&&(U.textContent=De),ae.forEach(s),he=i(l),b=f(l,"DIV",{class:!0});var B=F(b);u(z.$$.fragment,B),ge=i(B),J=f(B,"P",{"data-svelte-h":!0}),$(J)!=="svelte-1lxcwhv"&&(J.textContent=we),Ae=i(B),R=f(B,"P",{"data-svelte-h":!0}),$(R)!=="svelte-1vivlhg"&&(R.innerHTML=Me),B.forEach(s),Pe=i(l),v=f(l,"DIV",{class:!0});var Y=F(v);u(H.$$.fragment,Y),xe=i(Y),W=f(Y,"P",{"data-svelte-h":!0}),$(W)!=="svelte-1vhtc74"&&(W.textContent=Fe),be=i(Y),u(M.$$.fragment,Y),Y.forEach(s),l.forEach(s),re=i(e),u(j.$$.fragment,e),ne=i(e),ee=f(e,"P",{}),F(ee).forEach(s),this.h()},h(){C(o,"name","hf:doc:metadata"),C(o,"content",Xe),C(y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),C(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),C(w,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),C(b,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),C(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),C(a,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,r){t(document.head,o),m(e,P,r),m(e,c,r),m(e,T,r),_(k,e,r),m(e,te,r),m(e,L,r),m(e,oe,r),_(S,e,r),m(e,se,r),m(e,a,r),_(I,a,null),t(a,de),t(a,q),t(a,fe),t(a,y),_(G,y,null),t(y,ce),t(y,E),t(a,le),t(a,x),_(V,x,null),t(x,me),t(x,K),t(x,pe),_(D,x,null),t(a,ue),t(a,w),_(N,w,null),t(w,_e),t(w,U),t(a,he),t(a,b),_(z,b,null),t(b,ge),t(b,J),t(b,Ae),t(b,R),t(a,Pe),t(a,v),_(H,v,null),t(v,xe),t(v,W),t(v,be),_(M,v,null),m(e,re,r),_(j,e,r),m(e,ne,r),m(e,ee,r),ie=!0},p(e,[r]){const l={};r&2&&(l.$$scope={dirty:r,ctx:e}),D.$set(l);const O={};r&2&&(O.$$scope={dirty:r,ctx:e}),M.$set(O)},i(e){ie||(h(k.$$.fragment,e),h(S.$$.fragment,e),h(I.$$.fragment,e),h(G.$$.fragment,e),h(V.$$.fragment,e),h(D.$$.fragment,e),h(N.$$.fragment,e),h(z.$$.fragment,e),h(H.$$.fragment,e),h(M.$$.fragment,e),h(j.$$.fragment,e),ie=!0)},o(e){g(k.$$.fragment,e),g(S.$$.fragment,e),g(I.$$.fragment,e),g(G.$$.fragment,e),g(V.$$.fragment,e),g(D.$$.fragment,e),g(N.$$.fragment,e),g(z.$$.fragment,e),g(H.$$.fragment,e),g(M.$$.fragment,e),g(j.$$.fragment,e),ie=!1},d(e){e&&(s(P),s(c),s(T),s(te),s(L),s(oe),s(se),s(a),s(re),s(ne),s(ee)),s(o),A(k,e),A(S,e),A(I),A(G),A(V),A(D),A(N),A(z),A(H),A(M),A(j,e)}}}const Xe='{"title":"PixArtTransformer2DModel","local":"pixarttransformer2dmodel","sections":[{"title":"PixArtTransformer2DModel","local":"diffusers.PixArtTransformer2DModel","sections":[],"depth":2}],"depth":1}';function qe(X){return Ie(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class We extends Ge{constructor(o){super(),Ve(this,o,qe,Oe,Se,{})}}export{We as component};
