identifier
stringlengths
24
102
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
efficientloftr/modeling_efficientloftr.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
efficientloftr/modeling_efficientloftr.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRAttention
[ -0.00007860398181946948, 0.03507406264543533, 0.03125188872218132, -0.0029509428422898054, -0.0004812847182620317, 0.03777207061648369, 0.04451708123087883, -0.00736330496147275, 0.0031476723961532116, 0.00792538933455944, 0.015513528138399124, 0.025068962946534157, -0.0019110868452116847, ...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelAttention", "Module", "None", "Tensor", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "batch_size", "class", "config", "c...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRMLP
[ -0.00024143498740158975, 0.03365067020058632, 0.039373572915792465, 0.03319283947348595, -0.0008441282552666962, 0.05356637388467789, 0.018656665459275246, -0.024494025856256485, 0.003977417945861816, 0.0019457871094346046, 0.03960249200463295, -0.01808437518775463, 0.0013806504430249333, ...
[ "ACT2FN", "LayerNorm", "Linear", "ModelMLP", "Module", "__init__", "activation", "class", "config", "def", "fc1", "fc2", "forward", "hidden_size", "hidden_states", "intermediate_size", "layer_norm", "mlp_activation_function", "nn", "return", "self", "super" ]
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRAggregatedAttention
[ -0.00031153642339631915, 0.013750572688877583, 0.020511271432042122, 0.0022487915121018887, -0.0012819544645026326, 0.046981122344732285, 0.002664173487573862, -0.026469852775335312, 0.0009238666389137506, 0, 0.03391807898879051, 0.02532397210597992, -0.0004905803361907601, 0.0004941612132...
[ "False", "ModelAggregatedAttention", "ModelAggregationLayer", "ModelAttention", "ModelMLP", "Module", "None", "Tensor", "_", "__init__", "aggregated_encoder_hidden_states", "aggregated_h", "aggregated_hidden_states", "aggregated_w", "aggregation", "align_corners", "attention", "att...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRLocalFeatureTransformerLayer
[ -0.00025541221839375794, 0.029287267476320267, 0.029741333797574043, -0.014189567416906357, -0.0010358383879065514, 0.03927672281861305, 0.018162645399570465, -0.03087649866938591, 0.008059673942625523, 0.01589231565594673, 0.0018730228766798973, 0.021114075556397438, 0.0038028040435165167, ...
[ "GradientCheckpointingLayer", "ModelAggregatedAttention", "ModelLocalFeatureTransformerLayer", "None", "Tensor", "_", "__init__", "batch_size", "class", "config", "cross_attention", "def", "dim", "embed_dim", "features_0", "features_1", "forward", "height", "hidden_states", "kw...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRLocalFeatureTransformer
[ -0.0001857274182839319, 0.03282953426241875, 0.02773529477417469, 0.0017334559233859181, -0.0006969200330786407, 0.051395200192928314, 0.031471069902181625, -0.03418799489736557, 0.009113025851547718, -0.01398085243999958, 0.011603541672229767, 0.011490336619317532, 0.004245198331773281, -...
[ "ModelLocalFeatureTransformer", "ModelLocalFeatureTransformerLayer", "Module", "ModuleList", "None", "Tensor", "__init__", "class", "config", "def", "for", "forward", "hidden_states", "i", "in", "kwargs", "layer", "layer_idx", "layers", "nn", "num_attention_layers", "positi...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTROutConvBlock
[ -0.00018271109729539603, 0.0031646271236240864, 0.027133485302329063, -0.014361446723341942, -0.0005853850743733346, 0.050861094146966934, 0.01316939014941454, -0.05335873365402222, 0.008060574531555176, -0.0006492452812381089, 0.027814660221338272, -0.0077199870720505714, 0.0024692607112228...
[ "ACT2CLS", "BatchNorm2d", "Conv2d", "False", "ModelOutConvBlock", "Module", "__init__", "activation", "align_corners", "batch_norm", "bilinear", "class", "config", "def", "forward", "functional", "hidden_size", "hidden_states", "intermediate_size", "interpolate", "kernel_size...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRFineFusionLayer
[ -0.00024603219935670495, 0.016652308404445648, 0.010025369934737682, 0.020730426535010338, -0.0006301256362348795, 0.010648415423929691, 0.026281194761395454, -0.06162487342953682, 0.004191397689282894, 0.005805652122944593, -0.004248037934303284, -0.007816390134394169, 0.0032426691614091396...
[ "Conv2d", "False", "ModelFineFusionLayer", "ModelOutConvBlock", "Module", "ModuleList", "_", "__init__", "align_corners", "append", "batch_size", "bilinear", "class", "coarse_features", "coarse_height", "coarse_width", "config", "def", "embed_dim", "enumerate", "fine_embed_di...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRPreTrainedModel
[ -0.00022131213336251676, 0.027076518163084984, -0.0027161843609064817, 0.026848984882235527, -0.0006932669202797115, 0.04345894977450371, 0.02207077480852604, -0.01604112982749939, 0.002559754764661193, 0.024346113204956055, 0.010239019058644772, -0.004607558716088533, -0.0023891045711934566...
[ "BatchNorm2d", "Conv1d", "Conv2d", "LayerNorm", "Linear", "Model", "ModelAttention", "ModelConfig", "ModelPreTrainedModel", "ModelRepVGGBlock", "ModelRotaryEmbedding", "None", "PreTrainedModel", "ROPE_INIT_FUNCTIONS", "True", "_", "_can_record_outputs", "_init_weights", "_support...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRModel
[ -0.000247707444941625, 0.011437006294727325, 0.0036519153509289026, 0.014834136702120304, -0.0007077355403453112, 0.05435409024357796, 0.026271142065525055, -0.028649134561419487, 0.0026044666301459074, 0.03374483063817024, 0.004925839137285948, 0.005860050208866596, 0.0013163881376385689, ...
[ "BackboneOutput", "Input", "ModelLocalFeatureTransformer", "ModelModel", "ModelPreTrainedModel", "ModelRotaryEmbedding", "ModelepVGG", "None", "ValueError", "_", "__init__", "a", "auto_docstring", "backbone", "batch_size", "be", "channels", "check_model_inputs", "class", "coars...
efficientloftr/modeling_efficientloftr.py:mask_border
[ 0, -0.0320143960416317, 0.03652346879243851, -0.027279874309897423, 0.00018141961481887847, 0.046217966824769974, 0.007045421749353409, -0.058167003095149994, 0.014429024420678616, 0.019276274368166924, 0.0017261283937841654, -0.03877800330519676, 0.003663619514554739, 0.003325439058244228...
[ "Model_border", "border_margin", "def", "if", "return", "tensor", "value" ]
efficientloftr/modeling_efficientloftr.py:create_meshgrid
[ -0.00016686093294993043, -0.009273955598473549, -0.0021077170968055725, -0.002838392276316881, -0.0006217765621840954, 0.029901480302214622, 0.033498648554086685, -0.0537327341735363, 0.011522186920046806, 0.059353314340114594, 0.0035971705801784992, -0.012533891014754772, -0.003442604560405...
[ "False", "Model_meshgrid", "None", "def", "device", "dim", "dtype", "grid", "height", "if", "ij", "indexing", "linspace", "meshgrid", "normalized_coordinates", "permute", "return", "stack", "torch", "unsqueeze", "width", "xs", "ys" ]
efficientloftr/modeling_efficientloftr.py:spatial_expectation2d
[ -0.0003348449245095253, 0.02200615406036377, 0.012558485381305218, 0.01624538004398346, -0.001577011658810079, 0.026614772155880928, 0.020277921110391617, -0.004925461020320654, 0.003024405799806118, 0.024656109511852264, 0.018895335495471954, 0.01065743062645197, -0.0019586628768593073, 0...
[ "Model_expectation2d", "True", "batch_size", "cat", "create_meshgrid", "def", "device", "dtype", "embed_dim", "expected_x", "expected_y", "grid", "height", "input", "input_flat", "keepdim", "normalized_coordinates", "output", "pos_x", "pos_y", "r", "reshape", "return", ...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRForKeypointMatching
[ -0.00008713916031410918, 0.01622372679412365, 0.013181778602302074, -0.021180978044867516, -0.00009330051398137584, 0.037855364382267, 0.04979782924056053, -0.04506590962409973, 0.012956448830664158, 0.0128437839448452, 0.024786250665783882, 0.01994166523218155, 0.000045990112994331867, -0...
[ "False", "Model", "ModelFineFusionLayer", "ModelForKeypointMatching", "ModelKeypointMatchingOutput", "ModelModel", "ModelPreTrainedModel", "None", "True", "_", "__init__", "_coarse_matching", "_fine_matching", "_get_first_stage_fine_matching", "_get_matches_from_scores", "_get_second_s...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormEmbeddings
[ -0.00024802194093354046, 0.0058108000084757805, 0.0058108000084757805, -0.012812105007469654, -0.001112555619329214, 0.02766507677733898, 0.025284064933657646, -0.014739589765667915, 0.0030896447133272886, -0.01411599200218916, 0.02517068386077881, 0.026304498314857483, -0.000083707403973676...
[ "Dropout", "Embedding", "False", "LayerNorm", "ModelEmbeddings", "Module", "None", "__init__", "arange", "batch_size", "buffered_token_type_ids", "class", "config", "create_position_ids_from_input_ids", "create_position_ids_from_inputs_embeds", "cumsum", "def", "device", "dim", ...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:eager_attention_forward
[ 0.00002095530362566933, 0.025862814858555794, 0.0269921962171793, -0.01146321278065443, 0.00008955634984886274, 0.03704368323087692, 0.060083046555519104, -0.023942869156599045, 0.021458230912685394, 0.014569009654223919, 0.023152301087975502, 0.026879258453845978, 0.003063444746658206, -0...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "shape", "size", "softmax", "tor...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormSelfAttention
[ -0.00004590151002048515, 0.03693104162812233, 0.04006458446383476, -0.006714735180139542, -0.00029376966995187104, 0.008001726120710373, 0.023389659821987152, -0.027866151183843613, 0.0027698283083736897, 0.030216308310627937, 0.010239970870316029, 0.007945769466459751, -0.000088742919615469...
[ "ALL_ATTENTION_FUNCTIONS", "Dropout", "EncoderDecoderCache", "False", "Linear", "ModelSelfAttention", "Module", "None", "The", "ValueError", "__init__", "_attn_implementation", "a", "all_head_size", "and", "attention", "attention_head_size", "attention_interface", "attention_mask...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormCrossAttention
[ -0.00026539957616478205, 0.03879137337207794, 0.05462927371263504, -0.0018291050801053643, -0.0012409222545102239, 0.020658127963542938, 0.029839517548680305, -0.03190533071756363, -0.0006957772420719266, 0.022953474894165993, 0.007689414545893669, 0.013026097789406776, -0.000245673931203782...
[ "ALL_ATTENTION_FUNCTIONS", "Dropout", "False", "Linear", "ModelCrossAttention", "Module", "None", "The", "True", "ValueError", "__init__", "_attn_implementation", "a", "all_head_size", "and", "attention", "attention_head_size", "attention_interface", "attention_mask", "attentio...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormSelfOutput
[ -0.00009031750960275531, 0.04398683086037636, 0.04398683086037636, 0.00541376369073987, -0.0006731958710588515, 0.05549107864499092, 0.026730459183454514, -0.019173746928572655, 0.0022698333486914635, 0.006626221351325512, 0.011109494604170322, -0.002904258668422699, 0.0032849139533936977, ...
[ "Dropout", "Linear", "ModelSelfOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "nn", "return", "self", "super" ]
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormAttention
[ 0.00002212798790424131, 0.03948509320616722, 0.03611988574266434, 0.005664764903485775, 0.00009464644244872034, 0.039933785796165466, 0.032979026436805725, -0.015928644686937332, 0.010824748314917088, 0.01177822332829237, 0.019406026229262352, 0.037017274647951126, 0.0016966250259429216, -...
[ "False", "LayerNorm", "ModelAttention", "ModelCrossAttention", "ModelSelfAttention", "ModelSelfOutput", "Module", "None", "__init__", "attention_class", "attention_mask", "attention_output", "attn_weights", "cache_position", "class", "config", "def", "else", "encoder_attention_ma...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormIntermediate
[ -0.00024060123541858047, 0.028857890516519547, 0.03718447685241699, 0.029200078919529915, -0.0009338892414234579, 0.0410626120865345, 0.031025083735585213, -0.014257851056754589, 0.0015327190048992634, 0.007699239533394575, 0.022356310859322548, -0.007984397001564503, -0.0008412132156081498,...
[ "ACT2FN", "LayerNorm", "Linear", "ModelIntermediate", "Module", "__init__", "class", "config", "def", "dense", "else", "eps", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_size", "isinstance", "layer_norm_eps", "nn", "...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormOutput
[ -0.0002648788213264197, 0.03665350377559662, 0.05406391620635986, 0.016150448471307755, -0.0012742818798869848, 0.0490240603685379, 0.04421328753232956, -0.02405386045575142, -0.0004617482190951705, 0.00538348313421011, 0.008533393032848835, -0.002505610464140773, 0.0014532541390508413, 0....
[ "Dropout", "Linear", "ModelOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "intermediate_size", "nn", "return", "self", "super" ]
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormLayer
[ -0.00008142412843881175, 0.023534199222922325, 0.0189394261687994, 0.003950383514165878, -0.00009981021867133677, 0.03832712396979332, 0.02062044106423855, -0.013728282414376736, 0.003502112813293934, 0.005435279104858637, 0.0047628735192120075, 0.010814524255692959, 0.002661605831235647, ...
[ "False", "GradientCheckpointingLayer", "If", "ModelAttention", "ModelIntermediate", "ModelLayer", "ModelOutput", "None", "True", "ValueError", "_", "__init__", "add_cross_attention", "and", "apply_chunking_to_forward", "are", "attention", "attention_mask", "attention_output", "...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormEncoder
[ -0.000011508945135574322, 0.014999293722212315, 0.0157887302339077, 0.01770593412220478, -0.00019647806766442955, 0.04894506558775902, 0.020976455882191658, -0.03699073940515518, 0.011785159818828106, 0.012067100964486599, 0.010995723307132721, 0.0013674168149009347, -0.0004546308482531458, ...
[ "BaseModelOutputWithPastAndCrossAttentions", "ModelEncoder", "ModelLayer", "Module", "ModuleList", "None", "__init__", "attention_mask", "cache_position", "class", "config", "def", "else", "encoder_attention_mask", "encoder_hidden_states", "enumerate", "for", "forward", "hidden_s...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormPooler
[ -0.0002915378427132964, 0.011618588119745255, 0.03685896843671799, 0.022893769666552544, -0.001151842763647437, 0.03296703100204468, 0.037087906152009964, -0.0066105760633945465, -0.0011303798528388143, 0.0015954095870256424, 0.014022434130311012, -0.016025640070438385, -0.001244848710484802...
[ "Linear", "ModelPooler", "Module", "Tanh", "__init__", "activation", "class", "config", "def", "dense", "first_token_tensor", "forward", "hidden_size", "hidden_states", "nn", "pooled_output", "return", "self", "super" ]
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormPreTrainedModel
[ -0.0002050270704785362, 0.02918454259634018, -0.002432045293971896, -0.001329141086898744, -0.0009756460785865784, 0.03031572699546814, 0.02861895225942135, -0.013461087830364704, 0.0013786302879452705, -0.005260005127638578, 0.0021916686091572046, 0.016062811017036438, -0.000738804461434483...
[ "Model", "ModelConfig", "ModelCrossAttention", "ModelEmbeddings", "ModelLMHead", "ModelLayer", "ModelPreTrainedModel", "ModelSelfAttention", "PreTrainedModel", "True", "_can_record_outputs", "_init_weights", "_no_split_modules", "_supports_attention_backend", "_supports_flash_attn", "_...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormModel
[ 0.000011037807780667208, 0.025626948103308678, 0.014212324284017086, 0.01264561153948307, 0, 0.043644145131111145, 0.003804874373599887, -0.006490667816251516, 0.010687220841646194, 0.006098989862948656, 0.015667129307985306, -0.005259679164737463, 0.0013708738842979074, 0.0031614028848707...
[ "BaseModelOutputWithPoolingAndCrossAttentions", "DynamicCache", "EncoderDecoderCache", "False", "LayerNorm", "ModelEmbeddings", "ModelEncoder", "ModelModel", "ModelPooler", "ModelPreTrainedModel", "None", "True", "ValueError", "You", "__init__", "_create_attention_masks", "add_poolin...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormForCausalLM
[ -0.0002728815597947687, 0.038557812571525574, 0.01281480211764574, -0.00816518347710371, -0.0011127898469567299, 0.02619663067162037, 0.02517598308622837, -0.017918042838573456, -0.002735903486609459, 0.011340533383190632, 0.02200063318014145, 0.010149776935577393, -0.00022503870422951877, ...
[ "CausalLMOutputWithCrossAttentions", "False", "GenerationMixin", "If", "Model", "ModelForCausalLM", "ModelLMHead", "ModelLMHeadModel", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "_tied_weights_keys", "a", "add", "add_pooling_layer", "as", "attention_mask", ...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormForMaskedLM
[ -0.00006239726644707844, 0.03926867991685867, 0.019301554188132286, -0.02085455134510994, -0.0002235901920357719, 0.03793753683567047, 0.012867702171206474, -0.019856195896863937, 0.007154886145144701, 0.0036883715074509382, 0.019412482157349586, 0.03705010935664177, -0.00048531204811297357,...
[ "CrossEntropyLoss", "False", "If", "MaskedLMOutput", "Model", "ModelForMaskedLM", "ModelLMHead", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "_tied_weights_keys", "add_pooling_layer", "attention", "attention_mask", "attentions", "auto_docstring", "bi", "bi...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormLMHead
[ -0.00016041471099015325, 0.04488105699419975, 0.013015505857765675, 0.033211980015039444, -0.0006381525308825076, 0.017167003825306892, 0.05924299359321594, -0.01739140972495079, 0.002791040576994419, 0.02176731266081333, 0.014586343429982662, 0.015035153366625309, 0.001016836380586028, 0....
[ "LayerNorm", "Linear", "ModelLMHead", "Module", "Parameter", "__init__", "bias", "class", "config", "decoder", "def", "dense", "eps", "features", "forward", "gelu", "hidden_size", "kwargs", "layer_norm", "layer_norm_eps", "nn", "return", "self", "super", "torch", "v...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormForSequenceClassification
[ -0.0003315486537758261, 0.026010526344180107, 0.0015900075668469071, 0.014602401293814182, -0.0010909020202234387, 0.029889289289712906, 0.01985013857483864, 0.011122922413051128, -0.002566828392446041, 0.012092613615095615, 0.05065207928419113, 0.0031800151336938143, 0.00012032008089590818,...
[ "BCEWithLogitsLoss", "CrossEntropyLoss", "False", "MSELoss", "Model", "ModelClassificationHead", "ModelForSequenceClassification", "ModelModel", "ModelPreTrainedModel", "None", "SequenceClassifierOutput", "True", "__init__", "add_pooling_layer", "and", "attention_mask", "attentions",...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormForMultipleChoice
[ -0.00027658778708428144, 0.05024975538253784, 0.015645945444703102, 0.025810102000832558, -0.0009778715902939439, 0.04499636963009834, 0.043854329735040665, -0.00748036103323102, 0.00046038482105359435, 0.014618109911680222, 0.038600947707891464, -0.008222687058150768, -0.002198426751419902,...
[ "CrossEntropyLoss", "Dropout", "Linear", "Model", "ModelForMultipleChoice", "ModelModel", "ModelPreTrainedModel", "MultipleChoiceModelOutput", "None", "True", "__init__", "attention_mask", "attentions", "auto_docstring", "can_return_tuple", "class", "classifier", "config", "def",...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormForTokenClassification
[ -0.0002524004958104342, 0.032534778118133545, 0.007394268177449703, -0.027529429644346237, -0.000988272367976606, 0.042317964136600494, 0.03731261566281319, 0.015812357887625694, -0.00042303744703531265, 0.010750127956271172, 0.049825992435216904, 0.0179737601429224, -0.0009171736310236156, ...
[ "CrossEntropyLoss", "Dropout", "False", "Linear", "Model", "ModelForTokenClassification", "ModelModel", "ModelPreTrainedModel", "None", "TokenClassifierOutput", "True", "__init__", "add_pooling_layer", "attention_mask", "attentions", "auto_docstring", "can_return_tuple", "class", ...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormClassificationHead
[ -0.0003826709871646017, 0.027710409834980965, 0.03449428454041481, 0.010808208957314491, -0.001171368407085538, 0.010865699499845505, 0.05542081966996193, 0.0050016711466014385, -0.007703723851591349, 0.007761214394122362, 0.020236646756529808, 0.003262584563344717, -0.000524600618518889, ...
[ "Dropout", "Linear", "ModelClassificationHead", "Module", "None", "__init__", "class", "classifier_dropout", "config", "def", "dense", "dropout", "else", "features", "forward", "hidden_dropout_prob", "hidden_size", "if", "is", "kwargs", "nn", "not", "num_labels", "out_p...
roberta_prelayernorm/modeling_roberta_prelayernorm.py:RobertaPreLayerNormForQuestionAnswering
[ -0.00020363238581921905, 0.025275100022554398, 0.01444291416555643, 0.014668584801256657, -0.0007757424609735608, 0.05258123204112053, 0.04084636643528938, 0.023131228983402252, 0.006008477881550789, 0.03159387409687042, 0.020084677264094353, 0.019069159403443336, 0.0009943607728928328, -0...
[ "CrossEntropyLoss", "False", "Linear", "Model", "ModelForQuestionAnswering", "ModelModel", "ModelPreTrainedModel", "None", "QuestionAnsweringModelOutput", "True", "__init__", "add_pooling_layer", "and", "attention_mask", "attentions", "auto_docstring", "can_return_tuple", "clamp", ...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:BaseModelOutputWithDeepstackFeatures
[ -0.000323236221447587, 0.014142042025923729, 0.015544557943940163, 0.02173900604248047, -0.001380602247081697, 0.05095810070633888, 0.0631132423877716, -0.030621610581874847, 0.00876572821289301, -0.011629199609160423, 0.031322870403528214, 0.01601206324994564, -0.0004565483541227877, 0.01...
[ "ModelModelOutputWithDeepstackFeatures", "ModelModelOutputWithPooling", "None", "class", "deepstack_features", "r" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:SinusoidsPositionEmbedding
[ -0.0002440618263790384, 0.011971499770879745, 0.020066512748599052, 0.01892637088894844, -0.0009976249421015382, 0.016190027818083763, 0.04172922670841217, 0, 0.004674585536122322, 0.008779099211096764, 0.020294541493058205, -0.001617577625438571, -0.002209026599302888, 0.0246270839124918,...
[ "False", "ModelPositionEmbedding", "Module", "ValueError", "__init__", "arange", "cat", "channels", "class", "cos", "def", "dim", "even", "exp", "float", "forward", "if", "input", "inv_timescales", "length", "log", "log_timescale_increment", "max_timescale", "needs", ...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoePreTrainedModel
[ -0.0003162299399264157, 0.05197669938206673, -0.001372725353576243, 0.014316591434180737, -0.0012002362636849284, 0.03748762235045433, 0.03219795599579811, -0.02380348928272724, -0.005174671765416861, 0.0031766733154654503, 0.01345414575189352, -0.007187043782323599, -0.005749634932726622, ...
[ "False", "ModelCode2Wav", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "ModelThinkerTextSparseMoeBlock", "ModelVisionBlock", "ModelVisionRotaryEmbedding", "PreTrainedModel", "SinusoidsPositionEmbedding", "True", "_can_compile_fullgraph", "_init_weights", "_no_split_modules",...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:_get_feat_extract_output_lengths
[ -0.0001385386858601123, 0.009770065546035767, 0.02338038757443428, 0.0013624441344290972, -0.0005470954347401857, 0.01762000285089016, 0.048342056572437286, -0.056474365293979645, 0.016038719564676285, -0.013610322028398514, 0.030722055584192276, 0.003388462122529745, 0.0003917909343726933, ...
[ "_get_feat_extract_output_lengths", "def", "feat_lengths", "input_lengths", "input_lengths_leave", "output_lengths", "return" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoePreTrainedModelForConditionalGeneration
[ -0.0001939640787895769, 0.03385554999113083, -0.009761682711541653, -0.0012766362633556128, -0.0005501526757143438, 0.04333510249853134, 0.018169144168496132, -0.04830058291554451, 0.001819735742174089, 0.018281996250152588, 0.043560806661844254, 0.03362984582781792, -0.00029094613273628056,...
[ "False", "ModelPreTrainedModel", "ModelPreTrainedModelForConditionalGeneration", "None", "Tensor", "True", "_", "_get_feat_extract_output_lengths", "_iter", "_llm_pos_ids", "_prepare_4d_causal_attention_mask_with_cache_position", "and", "append", "arange", "argwhere", "attention_mask",...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeAudioAttention
[ -0.00011531353811733425, 0.04439307004213333, 0.03380183130502701, 0.0044787428341805935, -0.00039259420009329915, 0.010591240599751472, 0.027604829519987106, -0.03177371993660927, -0.0013943255180492997, 0.013070041313767433, 0.03177371993660927, 0.027266809716820717, -0.003605528734624386,...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelAudioAttention", "Module", "None", "ValueError", "_", "__init__", "_attn_implementation", "and", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "be", "by", "class", "config", "contiguous", "cu_s...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeAudioEncoderLayer
[ -0.0001686819305177778, 0.04453909397125244, 0.03255649656057358, 0.014413031749427319, -0.0005793473683297634, 0.02306085079908371, 0.028147803619503975, -0.029617367312312126, 0.006132603622972965, 0.010060861706733704, 0.029617367312312126, 0.027130411937832832, 0.001257607713341713, -0...
[ "ACT2FN", "GradientCheckpointingLayer", "LayerNorm", "Linear", "ModelAudioAttention", "ModelAudioEncoderLayer", "None", "__init__", "activation_dropout", "activation_fn", "activation_function", "attention_mask", "clamp", "clamp_value", "class", "config", "cu_seqlens", "d_model", ...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeAudioEncoder
[ -0.000045726887037744746, 0.04099929705262184, 0.015234711579978466, 0.01176209282130003, -0.00009757986845215783, 0.027220845222473145, 0.01557077094912529, -0.05376956984400749, 0.008289474993944168, -0.01803520880639553, 0.02330014668405056, 0.01557077094912529, 0, -0.01433855202049017,...
[ "ACT2FN", "BaseModelOutputWithPooling", "Conv2d", "F", "False", "LayerNorm", "Linear", "ModelAudioAttention", "ModelAudioEncoder", "ModelAudioEncoderConfig", "ModelAudioEncoderLayer", "ModelPreTrainedModel", "ModuleList", "None", "SinusoidsPositionEmbedding", "T", "True", "_", "_...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:apply_rotary_pos_emb_vision
[ -0.00006463831959990785, 0.03513491153717041, 0.018587501719594002, -0.0022809358779340982, -0.0004639792023226619, 0.01700076460838318, 0.060749396681785583, -0.009180412627756596, 0.016887426376342773, 0.04692211002111435, 0.009010405279695988, 0.01983422413468361, 0.0013458938337862492, ...
[ "Model_rotary_pos_emb_vision", "cos", "def", "dtype", "float", "k", "k_embed", "orig_q_dtype", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeVisionAttention
[ -0.00014312515850178897, 0.03664004057645798, 0.021825702860951424, 0.021825702860951424, -0.0004081717343069613, 0.0252182986587286, 0.03958028927445412, -0.029628673568367958, 0.0055129691027104855, 0.03189040347933769, 0.028837068006396294, 0.03189040347933769, 0.0010036430321633816, -0...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelVisionAttention", "Module", "None", "Tensor", "_", "__init__", "_attn_implementation", "apply_rotary_pos_emb_vision", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_outputs", "cat", "class", "...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeVisionPatchMerger
[ -0.000262983376160264, 0.02479151450097561, 0.009097803384065628, 0.03206975758075714, -0.0008777959155850112, 0.02877180464565754, 0.015238821506500244, -0.05140259116888046, 0.003440106986090541, 0.010803641751408577, 0.016489768400788307, 0.0024876806419342756, -0.0010235029039904475, -...
[ "False", "GELU", "LayerNorm", "Linear", "ModelVisionPatchMerger", "Module", "ModuleList", "__init__", "class", "config", "def", "else", "eps", "for", "forward", "hidden", "hidden_size", "if", "in", "layer", "ln_q", "mlp", "nn", "out_hidden_size", "return", "self", ...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeVisionRotaryEmbedding
[ -0.00022179921506904066, 0.03434310480952263, 0.01224904041737318, 0.008013390935957432, -0.0010374479461461306, 0.04464603587985039, 0.0668545737862587, -0.016713643446564674, 0.005323181394487619, 0.0345720574259758, 0.00887196883559227, -0.00875749159604311, -0.0022466115187853575, 0.00...
[ "False", "ModelVisionRotaryEmbedding", "Module", "Tensor", "__init__", "arange", "class", "def", "device", "dim", "dtype", "float", "forward", "freqs", "inv_freq", "nn", "outer", "persistent", "register_buffer", "return", "self", "seq", "seqlen", "super", "theta", "...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTextTopKRouter
[ -0.0003398950502742082, 0.04046578332781792, 0.006490898784250021, -0.01017492264509201, -0.0015496290288865566, 0.055669691413640976, 0.07578562945127487, -0.027250079438090324, -0.003186972811818123, 0.0160225797444582, 0.02409234456717968, -0.01777687668800354, -0.0019151075975969434, 0...
[ "F", "ModelTextTopKRouter", "Module", "Parameter", "True", "__init__", "class", "config", "def", "dim", "dtype", "float", "forward", "functional", "hidden_dim", "hidden_size", "hidden_states", "keepdim", "linear", "nn", "num_experts", "num_experts_per_tok", "reshape", "...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeVisionMLP
[ -0.00020412541925907135, 0.02817285805940628, 0.032262466847896576, 0.033852867782115936, -0.00048280099872499704, 0.04589449241757393, 0.025673652067780495, -0.030444862321019173, 0.0004544009279925376, 0.005850411951541901, 0.04589449241757393, -0.022152045741677284, 0.0011999025009572506,...
[ "ACT2FN", "Linear", "ModelVisionMLP", "Module", "__init__", "act_fn", "class", "config", "def", "forward", "hidden_act", "hidden_size", "hidden_state", "intermediate_size", "linear_fc1", "linear_fc2", "nn", "return", "self", "super" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeVisionBlock
[ -0.00016528498963452876, 0.013067236170172691, 0.01606534607708454, 0.01855434477329254, -0.00040481562609784305, 0.043896861374378204, 0.026587016880512238, -0.01866747997701168, 0.005232551135122776, 0.021043341606855392, 0.021043341606855392, 0.019120024517178535, 0.0016475465381518006, ...
[ "GradientCheckpointingLayer", "LayerNorm", "ModelVisionAttention", "ModelVisionBlock", "ModelVisionMLP", "None", "Tensor", "__init__", "attn", "attn_implementation", "class", "config", "cu_seqlens", "def", "eps", "forward", "hidden_size", "hidden_states", "kwargs", "mlp", "nn...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeVisionPatchEmbed
[ -0.00006803657743148506, 0.010169493034482002, 0.01101226918399334, 0.00854012742638588, -0.00019313610391691327, 0.008989606983959675, 0.027081191539764404, -0.03191310539841652, 0.013877706602215767, -0.0005021538236178458, 0.008483941666781902, -0.00022386229829862714, -0.0015661581419408...
[ "Conv3d", "ModelVisionPatchEmbed", "Module", "__init__", "class", "config", "def", "dtype", "embed_dim", "forward", "hidden_size", "hidden_states", "in_channels", "kernel_size", "nn", "patch_size", "proj", "return", "self", "stride", "super", "target_dtype", "temporal_pat...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeVisionEncoder
[ -0.00021391107293311507, 0.014595452696084976, -0.014934881590306759, 0.03032233566045761, -0.0008096799138002098, 0.0432206429541111, 0.03665834665298462, -0.06064467132091522, 0, 0.022628609091043472, 0.013011449947953224, 0.013181164860725403, -0.000502072274684906, 0.005091437138617039...
[ "BaseModelOutputWithDeepstackFeatures", "Embedding", "F", "False", "ModelPreTrainedModel", "ModelTextTopKRouter", "ModelVisionAttention", "ModelVisionBlock", "ModelVisionEncoder", "ModelVisionEncoderConfig", "ModelVisionPatchEmbed", "ModelVisionPatchMerger", "ModelVisionRotaryEmbedding", "...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeThinkerTextRotaryEmbedding
[ -0.0003272967296652496, 0.04515239968895912, 0.0033456997480243444, -0.01594298705458641, -0.0013964660465717316, 0.05911706015467644, 0.02653285302221775, -0.013150054961442947, -0.010880797170102596, 0.022692572325468063, 0.01640847511589527, -0.0014982916181907058, -0.0012946403585374355,...
[ "False", "ModelThinkerTextRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "apply_interleaved_mrope", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeThinkerTextExperts
[ -0.0004163854173384607, 0.04810909554362297, 0.0009580549085512757, -0.0062494659796357155, -0.00162132375407964, 0.05730642378330231, 0.06037219986319542, -0.016154279932379723, -0.005630414932966232, -0.023111233487725258, 0.02134251594543457, -0.011732487939298153, -0.0024025070015341043,...
[ "ACT2FN", "ModelThinkerTextExperts", "Module", "None", "Parameter", "__init__", "act_fn", "chunk", "class", "config", "continue", "current_hidden_states", "current_state", "def", "dim", "down_proj", "dtype", "empty", "expert_hit", "expert_idx", "expert_mask", "final_hidden_...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeThinkerTextTopKRouter
[ -0.0003927720244973898, 0.045554179698228836, 0.011447552591562271, -0.004484608303755522, -0.0017259841552004218, 0.056647684425115585, 0.07505818456411362, -0.027851777151226997, -0.004514112137258053, 0.018174465745687485, 0.023249153047800064, -0.019118594005703926, -0.00210953620262444,...
[ "F", "ModelThinkerTextTopKRouter", "Module", "Parameter", "True", "__init__", "class", "config", "def", "dim", "dtype", "float", "forward", "functional", "hidden_dim", "hidden_size", "hidden_states", "if", "keepdim", "linear", "nn", "norm_topk_prob", "num_experts", "num...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeThinkerTextSparseMoeBlock
[ -0.0003717890358529985, 0.024027777835726738, 0.020645227283239365, -0.002784772776067257, -0.0013340666191652417, 0.04712243750691414, 0.029859762638807297, -0.04385652765631676, -0.006502663251012564, -0.015396440401673317, 0.019712109118700027, -0.02519417554140091, -0.0002934217627625912...
[ "ModelThinkerTextExperts", "ModelThinkerTextSparseMoeBlock", "ModelThinkerTextTopKRouter", "Module", "_", "__init__", "batch_size", "class", "config", "def", "experts", "final_hidden_states", "forward", "gate", "hidden_dim", "hidden_states", "hidden_states_reshaped", "nn", "resha...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeThinkerTextRMSNorm
[ -0.00013339596625883132, 0.06241871044039726, 0.037089377641677856, 0.051110975444316864, -0.0006643295637331903, 0.03392321243882179, 0.020014695823192596, -0.02318086288869381, 0.005795215722173452, 0.03731553256511688, 0.022502398118376732, 0.007971955463290215, 0.0029117425438016653, 0...
[ "ModelThinkerTextRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", ...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeThinkerTextAttention
[ -0.00016233831411227584, 0.04901205375790596, 0.03410515934228897, 0.003176184371113777, -0.000695231428835541, 0.026087060570716858, 0.03681550547480583, -0.015132753178477287, -0.002795042237266898, 0.012648271396756172, 0.019988786429166794, 0.03274998813867569, -0.001432811957783997, -...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelThinkerTextAttention", "ModelThinkerTextRMSNorm", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "c...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeThinkerTextMLP
[ -0.0002661260368768126, 0.037516575306653976, 0.03590543568134308, 0.02048451080918312, -0.0010429263347759843, 0.05362799018621445, 0.026583831757307053, -0.010932744480669498, -0.0051498981192708015, -0.010702582076191902, 0.032452989369630814, -0.03360380604863167, -0.002905808389186859, ...
[ "ACT2FN", "Linear", "ModelThinkerTextMLP", "Module", "None", "__init__", "act_fn", "class", "config", "def", "down_proj", "else", "forward", "gate_proj", "hidden_act", "hidden_size", "if", "intermediate_size", "is", "nn", "return", "self", "super", "up_proj", "x" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeThinkerTextDecoderLayer
[ -0.0003210666181985289, 0.04936175048351288, 0.024336490780115128, -0.003042061347514391, -0.0011192490346729755, 0.0410965271294117, 0.036045558750629425, -0.04821380227804184, 0.0011981703573837876, -0.014923320151865482, 0.014808525331318378, 0.019859494641423225, -0.0033146990463137627, ...
[ "False", "GradientCheckpointingLayer", "ModelThinkerTextAttention", "ModelThinkerTextDecoderLayer", "ModelThinkerTextMLP", "ModelThinkerTextRMSNorm", "ModelThinkerTextSparseMoeBlock", "None", "Tensor", "_", "__init__", "and", "attention_mask", "cache_position", "class", "config", "de...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeThinkerTextPreTrainedModel
[ -0.000348669447703287, 0.05392754077911377, 0.00894918292760849, 0.005288153421133757, -0.0014382614754140377, 0.05276530981063843, 0.047419045120477676, -0.019757935777306557, -0.01005330216139555, -0.006450384855270386, 0.013365662656724453, -0.005288153421133757, -0.0025133255403488874, ...
[ "ModelTextConfig", "ModelThinkerTextAttention", "ModelThinkerTextDecoderLayer", "ModelThinkerTextExperts", "ModelThinkerTextPreTrainedModel", "ModelThinkerTextTopKRouter", "OutputRecorder", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_init_weights", "_no_spli...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTextRMSNorm
[ -0.00010017822205554694, 0.046998150646686554, 0.0384119488298893, 0.05196910724043846, -0.0005719426553696394, 0.03389289602637291, 0.022934192791581154, -0.03344099223613739, 0.007625902071595192, 0.036152422428131104, 0.02191740646958351, 0.012822812423110008, 0.002414868911728263, 0.02...
[ "ModelTextRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tupl...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeThinkerTextModel
[ -0.00018215877935290337, 0.05161283537745476, 0.01290320884436369, 0.00894169695675373, -0.0009762296103872359, 0.04029422998428345, 0.04482167214155197, -0.03667227551341057, 0.0004562812391668558, 0.0008099875994957983, 0.026032788679003716, 0.014261441305279732, -0.0020232005044817924, ...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelPreTrainedModel", "ModelTextConfig", "ModelTextRMSNorm", "ModelThinkerTextAttention", "ModelThinkerTextDecoderLayer", "ModelThinkerTextModel", "ModelThinkerTextRotaryEmbedding", "ModelThinkerTextSparseMoeBlock", "ModuleLi...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeThinkerCausalLMOutputWithPast
[ -0.00032916050986386836, 0.030663128942251205, 0.006319881416857243, -0.00150682358071208, -0.001558026415295899, 0.06741207093000412, 0.054772306233644485, -0.020949237048625946, 0.008660578168928623, 0.010357583872973919, 0.013927146792411804, 0.012230141088366508, -0.0019457043381407857, ...
[ "ModelThinkerCausalLMOutputWithPast", "MoeCausalLMOutputWithPast", "None", "class", "r", "rope_deltas" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:load_balancing_loss_func
[ -0.00027690583374351263, 0.01840798556804657, 0.001643570140004158, -0.028812499716877937, -0.0009432663209736347, 0.05808233842253685, 0.038874007761478424, -0.011547867208719254, 0, -0.02252405695617199, 0.031556546688079834, -0.0061455233953893185, -0.0008396499906666577, 0.014349081553...
[ "Model_balancing_loss_func", "None", "_", "attention_mask", "batch_size", "cat", "compute_device", "concatenated_gate_logits", "def", "device", "dim", "else", "expand", "expert_attention_mask", "expert_mask", "float", "for", "functional", "gate_logits", "if", "in", "is", ...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeThinkerForConditionalGeneration
[ -0.00028381531592458487, 0.05757702887058258, -0.010567212477326393, 0.01816418021917343, -0.0011138413101434708, 0.04501061141490936, 0.051408059895038605, -0.039527084678411484, -0.003327243961393833, 0.006368887610733509, 0.021362904459238052, -0.003998404834419489, -0.0010638611856848001...
[ "False", "GenerationMixin", "Image", "Linear", "ModelAudioEncoder", "ModelAudioEncoderLayer", "ModelPreTrainedModelForConditionalGeneration", "ModelThinkerCausalLMOutputWithPast", "ModelThinkerConfig", "ModelThinkerForConditionalGeneration", "ModelThinkerTextAttention", "ModelThinkerTextDecode...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerResizeMLP
[ -0.00029760412871837616, 0.037863850593566895, 0.04061758518218994, 0.012334437109529972, -0.0011760741472244263, 0.046125058084726334, 0.03396272659301758, -0.027766825631260872, -0.0055361539125442505, 0.00639669643715024, 0.0024955719709396362, -0.02145618200302124, -0.0004517846100497991...
[ "ACT2FN", "Linear", "ModelTalkerResizeMLP", "Module", "__init__", "act_fn", "class", "config", "def", "forward", "hidden_act", "hidden_size", "hidden_state", "intermediate_size", "linear_fc1", "linear_fc2", "nn", "return", "self", "super", "text_config", "thinker_hidden_siz...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerCodePredictorOutputWithPast
[ -0.0002754869346972555, 0.012215054593980312, -0.0025640090461820364, 0.01209981832653284, -0.0014044431736692786, 0.026158655062317848, 0.07375127077102661, -0.028002435341477394, 0.003975654486566782, 0.007893690839409828, 0.029500508680939674, 0.018783526495099068, -0.005070399958640337, ...
[ "CausalLMOutputWithPast", "ModelTalkerCodePredictorOutputWithPast", "None", "class", "generation_steps", "r" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeRMSNorm
[ -0.00009835156379267573, 0.04154934734106064, 0.0313878208398819, 0.048549506813287735, -0.00046573654981330037, 0.03861379250884056, 0.02461347170174122, -0.03003295138478279, 0.008129219524562359, 0.04358164966106415, 0.01964561454951763, 0.004770271480083466, 0.0023992490023374557, 0.01...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "dtype", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "input_dtype", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", ...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerCodePredictorAttention
[ -0.00019171090389136225, 0.03589964285492897, 0.016472935676574707, -0.0006922893808223307, -0.0009159520850516856, 0.01579129695892334, 0.0497596301138401, -0.015677690505981445, -0.0019313098164275289, 0.007782042492181063, 0.032945871353149414, 0.03362751379609108, -0.003294587368145585, ...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelRMSNorm", "ModelTalkerCodePredictorAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cach...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeMLP
[ -0.0002434849739074707, 0.024471141397953033, 0.0228551235049963, 0.026548879221081734, -0.0009450823999941349, 0.060485273599624634, 0.036244992166757584, -0.005078915972262621, 0, -0.005021201446652412, 0.028164898976683617, -0.04640282690525055, -0.0015583038330078125, 0.009176678024232...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerCodePredictorDecoderLayer
[ -0.00021799292881041765, 0.031305909156799316, 0.013157556764781475, 0.0085637541487813, -0.0007869012770242989, 0.027335958555340767, 0.05217651650309563, -0.02631511352956295, 0.004593802150338888, -0.007316055241972208, 0.01622009091079235, 0.03198647499084473, -0.0033602812327444553, 0...
[ "False", "GradientCheckpointingLayer", "ModelMLP", "ModelRMSNorm", "ModelTalkerCodePredictorAttention", "ModelTalkerCodePredictorDecoderLayer", "None", "Tensor", "_", "__init__", "attention_mask", "attention_type", "cache_position", "class", "config", "def", "eps", "forward", "hi...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeRotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerCodePredictorModel
[ -0.00012675291509367526, 0.03042070008814335, -0.013914654031395912, -0.0018167918315157294, -0.000492928025778383, 0.01188660692423582, 0.06129207834601402, -0.019604451954364777, -0.0027885641902685165, -0.013745649717748165, 0.043039657175540924, 0.02974468469619751, -0.007041828706860542...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModelTalkerCodePredictorAttention", "ModelTalkerCodePredictorConfig", "ModelTalkerCodePredictorDecoderLayer", "ModelTalkerCodePredictorModel", "ModuleList", "None...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration
[ -0.00024011720961425453, 0.012180167250335217, -0.019237834960222244, 0.007626833859831095, -0.0006830000202171504, 0.016619667410850525, 0.055323004722595215, -0.012806250713765621, -0.006915375590324402, -0.009505083784461021, 0.05828266963362694, 0.004781000316143036, -0.00409800047054886...
[ "False", "GenerationMixin", "Linear", "ModelPreTrainedModel", "ModelTalkerCodePredictorAttention", "ModelTalkerCodePredictorConfig", "ModelTalkerCodePredictorDecoderLayer", "ModelTalkerCodePredictorModel", "ModelTalkerCodePredictorModelForConditionalGeneration", "ModelTalkerCodePredictorOutputWith...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerOutputWithPast
[ -0.0002896391961257905, 0.01738554798066616, -0.013240648433566093, 0.008289797231554985, -0.0014823769452050328, 0.03661327064037323, 0.06723947077989578, -0.02118503861129284, 0.002317113569006324, 0.00984413456171751, 0.02199099026620388, 0.013528488576412201, -0.0031230661552399397, 0....
[ "ModelTalkerOutputWithPast", "MoeCausalLMOutputWithPast", "None", "class", "generation_step", "r" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerRotaryEmbedding
[ -0.0002759863855317235, 0.036963898688554764, 0.0014110032934695482, -0.002500327071174979, -0.0016010865801945329, 0.06878092139959335, 0.05918902903795242, -0.012984153814613819, 0.000665291678160429, 0.011638948693871498, -0.009007025510072708, 0.008539128117263317, -0.0037724231369793415...
[ "ModelTalkerRotaryEmbedding", "ModelThinkerTextRotaryEmbedding", "class", "pass" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerTextMLP
[ -0.00025586149422451854, 0.032058365643024445, 0.02237166464328766, 0.016259819269180298, -0.0010955196339637041, 0.04843350127339363, 0.04474332928657532, -0.019142765551805496, -0.004670373164117336, -0.008706498891115189, 0.028829464688897133, -0.023524843156337738, -0.0028973612934350967...
[ "ACT2FN", "Linear", "ModelTalkerTextMLP", "Module", "None", "__init__", "act_fn", "class", "config", "def", "down_proj", "else", "forward", "gate_proj", "hidden_act", "hidden_size", "if", "intermediate_size", "is", "nn", "return", "self", "super", "up_proj", "x" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerTextExperts
[ -0.00038064271211624146, 0.04216350242495537, -0.005738920997828245, -0.009838149882853031, -0.0014713305281475186, 0.05317286029458046, 0.06839856505393982, -0.021081751212477684, -0.006470926105976105, -0.022370079532265663, 0.022018717601895332, -0.011185039766132832, -0.00264985882677137...
[ "ACT2FN", "ModelTalkerTextExperts", "Module", "None", "Parameter", "__init__", "act_fn", "chunk", "class", "config", "continue", "current_hidden_states", "current_state", "def", "dim", "down_proj", "dtype", "empty", "expert_hit", "expert_idx", "expert_mask", "final_hidden_s...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerTextTopKRouter
[ -0.0003849716449622065, 0.042674750089645386, 0.005186986178159714, -0.007161577697843313, -0.0016430368414148688, 0.05375603958964348, 0.08252023905515671, -0.03112191893160343, -0.0036397320218384266, 0.017918679863214493, 0.0262885894626379, -0.016857706010341644, -0.0021072132512927055, ...
[ "F", "ModelTalkerTextTopKRouter", "Module", "Parameter", "True", "__init__", "class", "config", "def", "dim", "dtype", "float", "forward", "functional", "hidden_dim", "hidden_size", "hidden_states", "if", "keepdim", "linear", "nn", "norm_topk_prob", "num_experts", "num_...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerTextSparseMoeBlock
[ -0.00045640370808541775, 0.03182210028171539, 0.012823831290006638, -0.000048469701141584665, -0.0017439816147089005, 0.04725819081068039, 0.045358363538980484, -0.049158018082380295, -0.005165154114365578, -0.021254312247037888, 0.029447315260767937, -0.018523311242461205, -0.00102412537671...
[ "F", "Linear", "ModelTalkerTextExperts", "ModelTalkerTextMLP", "ModelTalkerTextSparseMoeBlock", "ModelTalkerTextTopKRouter", "Module", "_", "__init__", "batch_size", "class", "config", "def", "expert_output", "experts", "forward", "gate", "hidden_dim", "hidden_size", "hidden_st...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerDecoderLayer
[ -0.00034302720450796187, 0.04575621709227562, 0.018602905794978142, 0.0027297744527459145, -0.0013576655182987452, 0.04113437607884407, 0.053382255136966705, -0.04852932319045067, 0.0007113300962373614, -0.019411727786064148, 0.01606089435517788, 0.016869716346263885, -0.002267590258270502, ...
[ "False", "GradientCheckpointingLayer", "ModelTalkerDecoderLayer", "ModelTalkerTextSparseMoeBlock", "ModelThinkerTextAttention", "ModelThinkerTextMLP", "ModelThinkerTextRMSNorm", "ModelThinkerTextSparseMoeBlock", "None", "Tensor", "_", "__init__", "and", "attention_mask", "cache_position"...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerModel
[ -0.00019286191673018038, 0.041864365339279175, -0.004095427226275206, 0.003811022499576211, -0.0009669758146628737, 0.038906555622816086, 0.06643693149089813, -0.03845151141285896, -0.0006825712043792009, -0.00314267142675817, 0.03481113165616989, 0.017974374815821648, -0.0031853322871029377...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelPreTrainedModel", "ModelTalkerDecoderLayer", "ModelTalkerModel", "ModelTalkerRotaryEmbedding", "ModelTalkerTextConfig", "ModelTalkerTextSparseMoeBlock", "ModelTextConfig", "ModelTextRMSNorm", "ModelThinkerTextAttention", ...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeTalkerForConditionalGeneration
[ -0.0002997866540681571, 0.02750813402235508, -0.010113284923136234, 0.010228864848613739, -0.0010907900286838412, 0.04438287019729614, 0.06241341307759285, -0.038603853434324265, -0.007686096243560314, -0.007368250284343958, 0.051317695528268814, 0.004103104118257761, -0.004478740505874157, ...
[ "False", "GenerationMixin", "Linear", "ModelPreTrainedModelForConditionalGeneration", "ModelTalkerCodePredictorModelForConditionalGeneration", "ModelTalkerConfig", "ModelTalkerForConditionalGeneration", "ModelTalkerModel", "ModelTalkerOutputWithPast", "ModelTalkerResizeMLP", "ModelTalkerTextSpar...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeCausalConvNet
[ -0.00012881754082627594, 0.013190915808081627, 0.0142143489792943, -0.013361488468945026, -0.0001838981406763196, 0.03638873249292374, 0.0008670752868056297, -0.047987643629312515, 0.02251552976667881, 0.012053768150508404, 0.037525881081819534, -0.02342524752020836, 0.00282865553162992, 0...
[ "Conv1d", "F", "ModelCausalConvNet", "Module", "__init__", "_get_extra_padding_for_conv1d", "ceil", "class", "constant", "contiguous", "conv", "def", "dilation", "extra_padding", "forward", "groups", "hidden_state", "ideal_length", "in_channels", "kernel_size", "length", "m...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeCausalTransConvNet
[ -0.0002699531032703817, 0.035698167979717255, 0.04508037865161896, -0.02242577262222767, -0.0012442871229723096, 0.03661350533366203, 0.009553836658596992, -0.07093866914510727, 0.009954296983778477, 0.023455526679754257, 0.020709514617919922, -0.023112274706363678, -0.001280042459256947, ...
[ "ConvTranspose1d", "ModelCausalTransConvNet", "Module", "__init__", "ceil", "class", "contiguous", "conv", "def", "forward", "hidden_state", "in_channels", "kernel_size", "left_pad", "math", "nn", "out_channels", "pad", "return", "right_pad", "self", "shape", "stride", ...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeConvNeXtBlock
[ -0.0001233784860232845, 0.018519198521971703, 0.023631861433386803, -0.008861947804689407, -0.0006674864562228322, 0.02647222951054573, 0.02033703401684761, -0.027040302753448486, 0.009657250717282295, -0.0013065692037343979, 0.013406536541879177, -0.01107743475586176, 0.002570532960817218, ...
[ "GELU", "LayerNorm", "Linear", "ModelCausalConvNet", "ModelConvNeXtBlock", "Module", "Parameter", "__init__", "act", "class", "def", "dilation", "dim", "dwconv", "eps", "forward", "gamma", "groups", "hidden_states", "input", "kernel_size", "nn", "norm", "ones", "permu...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeCode2WavAttention
[ -0.00010994606418535113, 0.03625229373574257, 0.019702333956956863, -0.010864430107176304, -0.00041867460822686553, 0.011708815582096577, 0.052464500069618225, -0.016887715086340904, -0.0007388375233858824, -0.013397587463259697, 0.04638492316007614, 0.051788993179798126, -0.0039967591874301...
[ "ALL_ATTENTION_FUNCTIONS", "Identity", "Linear", "ModelCode2WavAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_kwargs", "c...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeCode2WavMlp
[ -0.00020316986774560064, 0.03741177171468735, 0.012318510562181473, 0.009467002935707569, -0.0007128767319954932, 0.026576045900583267, 0.04927404224872589, -0.00781312957406044, -0.0038495345506817102, -0.03170875832438469, 0.06022382900118828, -0.009238882921636105, -0.005161227658390999, ...
[ "ACT2FN", "Linear", "ModelCode2WavMlp", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeCode2WavRMSNorm
[ -0.00010198439849773422, 0.04546394199132919, 0.01980607397854328, 0.04793969914317131, -0.00038683737511746585, 0.01597990095615387, 0.035335835069417953, -0.028358696028590202, 0.006161264143884182, 0.01305400300770998, 0.044563665986061096, 0.02104395255446434, -0.001111278310418129, 0....
[ "ModelCode2WavRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "dtype", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "input_dtype", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "su...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeCode2WavLayerScale
[ -0.00009335677168564871, 0.030888762325048447, -0.003480622312054038, 0.024012066423892975, -0.0005988925113342702, 0.019277293235063553, 0.050504252314567566, -0.020630085840821266, 0.0035088055301457644, 0.014880717732012272, 0.016909906640648842, -0.019390026107430458, -0.0026210355572402...
[ "ModelCode2WavLayerScale", "Module", "Parameter", "True", "__init__", "channels", "class", "config", "def", "forward", "full", "hidden_size", "initial_scale", "layer_scale_initial_scale", "nn", "requires_grad", "return", "scale", "self", "super", "torch", "x" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeCode2WavTransformerLayer
[ -0.0001233069779118523, 0.024689579382538795, 0.019052689895033836, -0.003170750802382827, -0.00042100524296984076, 0.022660300135612488, 0.04148751497268677, -0.019390903413295746, 0.001987003954127431, -0.02277303673326969, 0.03833085671067238, 0.049830112606287, -0.006679715123027563, 0...
[ "False", "GradientCheckpointingLayer", "ModelCode2WavAttention", "ModelCode2WavLayerScale", "ModelCode2WavMlp", "ModelCode2WavRMSNorm", "ModelCode2WavTransformerLayer", "None", "_", "__init__", "attention_mask", "attention_type", "cache_position", "class", "config", "def", "forward",...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeCode2WavTransformerModel
[ -0.0000930194728425704, 0.03908924013376236, 0.009154519997537136, -0.022240428254008293, -0.0002966092433780432, 0.013703698292374611, 0.041785046458244324, -0.027295071631669998, 0.004184121266007423, -0.034371573477983475, 0.04335760325193405, 0.03998784348368645, -0.005363537929952145, ...
[ "BaseModelOutputWithPast", "DynamicCache", "False", "ModelCode2WavAttention", "ModelCode2WavTransformerLayer", "ModelCode2WavTransformerModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "None", "ValueError", "You", "__init__", "_can_record_outputs", ...
qwen3_omni_moe/modeling_qwen3_omni_moe.py:SnakeBeta
[ -0.00026887215790338814, 0.0537886768579483, 0.05333283916115761, -0.024273280054330826, -0.0009900192962959409, 0.026210583746433258, 0.026438502594828606, 0.0217661801725626, 0.00257832370698452, 0.010484233498573303, 0.036238983273506165, -0.046951133757829666, 0.0028062418568879366, 0....
[ "ModelBeta", "Module", "Parameter", "__init__", "alpha", "beta", "class", "def", "exp", "forward", "hidden_states", "in_features", "nn", "no_div_by_zero", "pow", "return", "self", "sin", "super", "torch", "unsqueeze", "zeros" ]
qwen3_omni_moe/modeling_qwen3_omni_moe.py:Qwen3OmniMoeCode2WavDecoderResidualUnit
[ -0.00008919433457776904, 0.024077173322439194, 0.016729678958654404, -0.04182419553399086, 0, 0.031424667686223984, 0.024868441745638847, -0.04024165868759155, 0.010116933844983578, -0.015599294565618038, 0.035041894763708115, -0.019103484228253365, -0.000031350486096926033, 0.011529913172...
[ "ModelCausalConvNet", "ModelCode2WavDecoderResidualUnit", "Module", "SnakeBeta", "__init__", "act1", "act2", "class", "conv1", "conv2", "def", "dilation", "dim", "forward", "hidden_state", "kernel_size", "nn", "residual", "return", "self", "super" ]