identifier
stringlengths
24
102
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaSelfAttention
[ -0.00023192334629129618, 0.03217891976237297, 0.037390999495983124, -0.0008674995042383671, -0.0007435710285790265, 0.016202766448259354, 0.030819248408079147, -0.02968618832528591, 0.00023369374684989452, 0.032632146030664444, 0.007364893797785044, 0, -0.0012038769200444221, -0.0151830120...
[ "ALL_ATTENTION_FUNCTIONS", "Dropout", "EncoderDecoderCache", "False", "Linear", "Model", "Module", "None", "The", "ValueError", "__init__", "_attn_implementation", "a", "all_head_size", "and", "attention", "attention_head_size", "attention_interface", "attention_mask", "attenti...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaCrossAttention
[ -0.00028904902865178883, 0.033091623336076736, 0.04527118057012558, 0.0025709206238389015, -0.0012351908953860402, 0.02343990094959736, 0.03561945632100105, -0.03010418638586998, -0.0009263931424356997, 0.028035959228873253, 0.006922814063727856, 0.0004578105581458658, -0.0007001808844506741...
[ "ALL_ATTENTION_FUNCTIONS", "Dropout", "False", "Linear", "Model", "Module", "None", "The", "True", "ValueError", "__init__", "_attn_implementation", "a", "all_head_size", "and", "attention", "attention_head_size", "attention_interface", "attention_mask", "attention_probs_dropou...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaSelfOutput
[ -0.0002157260460080579, 0.03423091024160385, 0.053856633603572845, 0.030579613521695137, -0.0008272469858638942, 0.049520716071128845, 0.019625721499323845, -0.017685970291495323, 0.001511865179054439, 0.023733431473374367, 0.021451370790600777, -0.00747374864295125, 0.0025102668441832066, ...
[ "Dropout", "LayerNorm", "Linear", "Model", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "layer_norm_eps", "nn", "return", "self", "super" ]
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaAttention
[ -0.000021539643057622015, 0.04343452304601669, 0.0282776840031147, 0.008596416562795639, -0.00009234431490767747, 0.03981497883796692, 0.03777898848056793, -0.019115714356303215, 0.011706961318850517, 0.007804641034454107, 0.018663272261619568, 0.019115714356303215, 0.001618897425942123, -...
[ "False", "Model", "ModelCrossAttention", "ModelSelfAttention", "ModelSelfOutput", "Module", "None", "__init__", "attention_class", "attention_mask", "attention_output", "attn_weights", "cache_position", "class", "config", "def", "else", "encoder_attention_mask", "encoder_hidden_s...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaIntermediate
[ -0.00024672862491570413, 0.024029221385717392, 0.0379890538752079, 0.019795501604676247, -0.0009654597961343825, 0.03089471347630024, 0.03570055589079857, -0.01762142963707447, -0.0008653380209580064, -0.0033612302504479885, 0.027004268020391464, -0.03089471347630024, -0.00003039410512428730...
[ "ACT2FN", "Linear", "Model", "Module", "__init__", "class", "config", "def", "dense", "else", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_size", "isinstance", "nn", "return", "self", "str", "super" ]
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaOutput
[ -0.00023025262635201216, 0.0370117723941803, 0.050491366535425186, 0.033584754914045334, -0.0009781274711713195, 0.041124191135168076, 0.02456028014421463, -0.017820483073592186, -0.00043730149627663195, 0.01667814329266548, 0.019419755786657333, -0.007368084043264389, 0.0014707610243931413,...
[ "Dropout", "LayerNorm", "Linear", "Model", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "intermediate_size", "layer_norm_eps", "nn", "return", "self", "super" ]
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaLayer
[ -0.0001149694508058019, 0.03796975687146187, 0.023028995841741562, 0.004493462387472391, -0.0003949332167394459, 0.03167891129851341, 0.026736101135611534, -0.01741216704249382, 0.0037071064580231905, 0.003931779880076647, 0.00966094434261322, 0.012806368060410023, 0.003047129139304161, -0...
[ "False", "GradientCheckpointingLayer", "If", "Model", "ModelAttention", "ModelIntermediate", "ModelOutput", "None", "True", "ValueError", "_", "__init__", "add_cross_attention", "and", "apply_chunking_to_forward", "are", "attention", "attention_mask", "attention_output", "be", ...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaLMHead
[ -0.00009366370795760304, 0.05718674883246422, 0.009512363001704216, 0.04007575288414955, -0.0003535475116223097, 0.01508469320833683, 0.05155813321471214, -0.02251446805894375, 0.0054316152818500996, 0.025216205045580864, 0.018574437126517296, 0.005741189233958721, 0.0002040373656200245, 0...
[ "LayerNorm", "Linear", "Model", "Module", "Parameter", "__init__", "bias", "class", "config", "decoder", "def", "dense", "eps", "features", "forward", "gelu", "hidden_size", "kwargs", "layer_norm", "layer_norm_eps", "nn", "return", "self", "super", "torch", "vocab_s...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaPreTrainedModel
[ -0.0002077890676446259, 0.025915026664733887, -0.006393509916961193, -0.009718134999275208, -0.0009874420939013362, 0.039327189326286316, 0.015458086505532265, 0.005285301711410284, 0.0009235069737769663, -0.01341216266155243, 0.0020032997708767653, 0.012559695169329643, 0.000390714500099420...
[ "Model", "ModelConfig", "ModelCrossAttention", "ModelEmbeddings", "ModelLMHead", "ModelLayer", "ModelSelfAttention", "PreTrainedModel", "True", "_can_record_outputs", "_init_weights", "_supports_attention_backend", "_supports_flash_attn", "_supports_flex_attn", "_supports_sdpa", "arang...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaEncoder
[ 0.000011223537512705661, 0.012675995007157326, 0.01836610771715641, 0.02253510244190693, 0.00019102019723504782, 0.036056164652109146, 0.02873225510120392, -0.02850690484046936, 0.009859107434749603, -0.0006337997619993985, 0.018704134970903397, -0.01723935268819332, -0.00007878483302192762,...
[ "BaseModelOutputWithPastAndCrossAttentions", "Model", "ModelLayer", "Module", "ModuleList", "None", "__init__", "attention_mask", "cache_position", "class", "config", "def", "else", "encoder_attention_mask", "encoder_hidden_states", "enumerate", "for", "forward", "hidden_states",...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaPooler
[ -0.0002529282064642757, 0.02325514517724514, 0.04012652486562729, 0.005927782505750656, -0.000983213889412582, 0.023939121514558792, 0.042178452014923096, -0.01869531348347664, -0.002864144742488861, 0.0018666815012693405, 0.01960727944970131, -0.015047447755932808, -0.0006661630468443036, ...
[ "Linear", "Model", "Module", "Tanh", "__init__", "activation", "class", "config", "def", "dense", "first_token_tensor", "forward", "hidden_size", "hidden_states", "nn", "pooled_output", "return", "self", "super" ]
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaModel
[ 0.000043793381337309256, 0.016621440649032593, 0.01896405965089798, 0.011434212327003479, 0.00018650316633284092, 0.0330197736620903, 0.0021334567572921515, -0.019298719242215157, 0.007920283824205399, -0.009426252916455269, 0.013609501533210278, -0.003235045587643981, 0.0006449174834415317,...
[ "BaseModelOutputWithPoolingAndCrossAttentions", "DynamicCache", "EncoderDecoderCache", "False", "Model", "ModelEmbeddings", "ModelEncoder", "ModelLayer", "ModelPooler", "ModelPreTrainedModel", "None", "True", "ValueError", "You", "__init__", "_create_attention_masks", "_no_split_modu...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaForCausalLM
[ -0.0002566452312748879, 0.04562582075595856, 0.015056520700454712, -0.01100722886621952, -0.0012333228951320052, 0.03832568973302841, 0.020873812958598137, -0.004220388364046812, 0.0012903552269563079, 0.011121293529868126, 0.014258068986237049, 0.014486197382211685, -0.0016895810840651393, ...
[ "CausalLMOutputWithCrossAttentions", "False", "GenerationMixin", "Model", "ModelLMHead", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "_tied_weights_keys", "add", "add_pooling_layer", "attention_mask", "attentions", "auto_docstring", "bias", "can_return_tuple",...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaForMaskedLM
[ -0.00009274150215787813, 0.0362846739590168, 0.013326779007911682, -0.018590295687317848, -0.000528451579157263, 0.04793160781264305, 0.013942722231149673, -0.0006754380883648992, 0.005991450976580381, 0, 0.017806367948651314, 0.0302372295409441, -0.0018898268463090062, 0.00249177170917391...
[ "CrossEntropyLoss", "False", "If", "MaskedLMOutput", "Model", "ModelLMHead", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "_tied_weights_keys", "add_pooling_layer", "attention", "attention_mask", "attentions", "auto_docstring", "bi", "bias", "can_return_tup...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaClassificationHead
[ -0.00029410512070171535, 0.03718914836645126, 0.037645455449819565, 0.003878622781485319, -0.0009767855517566204, 0.022130966186523438, 0.054985180497169495, 0.0007129821460694075, -0.0043919701129198074, 0.0003083647752646357, 0.022359119728207588, -0.008213554508984089, -0.0004135296330787...
[ "Dropout", "Linear", "Model", "Module", "None", "__init__", "class", "classifier_dropout", "config", "def", "dense", "dropout", "else", "features", "forward", "hidden_dropout_prob", "hidden_size", "if", "is", "kwargs", "nn", "not", "num_labels", "out_proj", "return", ...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaForSequenceClassification
[ -0.0003551274130586535, 0.028496719896793365, 0.01361381821334362, 0.005768566858023405, -0.0013844560598954558, 0.04776373505592346, 0.01753644272685051, 0.03253471851348877, -0.0008400475489906967, 0.009345078840851784, 0.040149226784706116, 0.006431952118873596, 0.001117659849114716, 0....
[ "BCEWithLogitsLoss", "CrossEntropyLoss", "False", "MSELoss", "Model", "ModelClassificationHead", "ModelModel", "ModelPreTrainedModel", "None", "SequenceClassifierOutput", "True", "__init__", "add_pooling_layer", "and", "attention_mask", "attentions", "auto_docstring", "can_return_t...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaForMultipleChoice
[ -0.0002957839460577816, 0.04570944234728813, 0.01523648016154766, 0.02400900050997734, -0.0014500627294182777, 0.04940313473343849, 0.040861472487449646, 0.013620490208268166, 0.0017674894770607352, 0.01662161573767662, 0.031627241522073746, -0.004357402678579092, -0.0009739227825775743, -...
[ "CrossEntropyLoss", "Dropout", "False", "Linear", "Model", "ModelModel", "ModelPreTrainedModel", "MultipleChoiceModelOutput", "None", "True", "__init__", "add_pooling_layer", "attention_mask", "attentions", "auto_docstring", "can_return_tuple", "class", "classifier", "config", ...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaForTokenClassification
[ -0.00026297062868252397, 0.036408014595508575, 0.014654797501862049, -0.015456232242286205, -0.0011663730256259441, 0.047628093510866165, 0.038468845188617706, 0.032515332102775574, -0.0020322082564234734, 0.012479476630687714, 0.0396137498319149, 0.006812191102653742, -0.000830056902486831,...
[ "CrossEntropyLoss", "Dropout", "False", "Linear", "Model", "ModelModel", "ModelPreTrainedModel", "None", "TokenClassifierOutput", "True", "__init__", "add_pooling_layer", "attention_mask", "attentions", "auto_docstring", "can_return_tuple", "class", "classifier", "classifier_drop...
xlm_roberta/modeling_xlm_roberta.py:XLMRobertaForQuestionAnswering
[ -0.00023668304493185133, 0.025625834241509438, 0.013040701858699322, 0.01583107002079487, -0.0010535065084695816, 0.059224147349596024, 0.028017578646540642, 0.034851133823394775, 0.004755015950649977, 0.033939994871616364, 0.019703418016433716, 0.02209516242146492, 0.0000867541239131242, ...
[ "CrossEntropyLoss", "False", "Linear", "Model", "ModelModel", "ModelPreTrainedModel", "None", "QuestionAnsweringModelOutput", "True", "__init__", "add_pooling_layer", "and", "attention_mask", "attentions", "auto_docstring", "can_return_tuple", "clamp", "class", "config", "conti...
lasr/modeling_lasr.py:LasrEncoderSubsampling
[ -0.00005581923687714152, 0.031729940325021744, 0.02070322260260582, 0.014627277851104736, 0, 0.013277066871523857, 0.01665259338915348, -0.044556938111782074, 0.009676506742835045, 0.0028410672675818205, 0.01609000563621521, 0.00464134756475687, 0.0004254568775650114, -0.006385369226336479...
[ "Conv1d", "Linear", "ModelEncoderSubsampling", "Module", "ReLU", "__init__", "act_fn", "class", "config", "conv_0", "conv_1", "def", "dense_0", "dense_1", "forward", "hidden_size", "hidden_states", "input_features", "kernel_size", "nn", "num_mel_bins", "return", "self", ...
lasr/modeling_lasr.py:LasrEncoderRotaryEmbedding
[ -0.0002727950632106513, 0.042728323489427567, 0.0020244265906512737, -0.006087637506425381, -0.0012634719023481011, 0.04066082462668419, 0.030323326587677002, 0.0007789017399773002, -0.004278575535863638, 0.024924855679273605, -0.003991422709077597, 0.004939026664942503, -0.00206749956123530...
[ "False", "ModelEncoderRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "de...
lasr/modeling_lasr.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
lasr/modeling_lasr.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
lasr/modeling_lasr.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
lasr/modeling_lasr.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
lasr/modeling_lasr.py:LasrEncoderAttention
[ -0.00014656664279755205, 0.03187029808759689, 0.031192205846309662, -0.0007416625157929957, -0.0007663845899514854, 0.03390457108616829, 0.039103273302316666, -0.012544691562652588, 0.0024863353464752436, 0.015370072796940804, 0.007459005806595087, 0.035938847810029984, -0.002020147396251559...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelEncoderAttention", "Module", "None", "Tensor", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "class", "config", "contiguous...
lasr/modeling_lasr.py:LasrEncoderConvolutionModule
[ -0.000036224180803401396, 0.023522745817899704, 0.02612381987273693, -0.014418991282582283, 0.00010337144340155646, 0.031439054757356644, -0.01136555802077055, -0.05383089929819107, 0.010178111493587494, 0.008877575397491455, 0.03912918269634247, 0.003109978511929512, 0.0025303915608674288, ...
[ "ACT2FN", "BatchNorm1d", "Conv1d", "ModelEncoderConvolutionModule", "Module", "None", "__init__", "activation", "all", "all_masked_rows", "attention_mask", "batch_norm_momentum", "bool", "channels", "class", "config", "conv_kernel_size", "def", "depthwise_conv", "dim", "else"...
lasr/modeling_lasr.py:LasrEncoderFeedForward
[ -0.00018150084360968322, 0.0453253872692585, 0.041453368961811066, -0.0017224785406142473, -0.0009181807399727404, 0.03188720718026161, 0.029040133580565453, -0.03917571157217026, 0.004299078602343798, -0.012470175512135029, 0.03120390884578228, -0.003388015553355217, 0.003644252195954323, ...
[ "ACT2FN", "Linear", "ModelEncoderFeedForward", "Module", "__init__", "activation", "activation_dropout", "class", "config", "def", "dropout", "forward", "functional", "hidden_act", "hidden_size", "hidden_states", "intermediate_size", "linear1", "linear2", "nn", "p", "return...
lasr/modeling_lasr.py:LasrEncoderBlock
[ -0.00011624776379903778, 0.009370812214910984, 0.02283070608973503, -0.0108474250882864, -0.00039577484130859375, 0.04952332377433777, 0.020558994263410568, -0.03952778875827789, 0.012835172936320305, -0.002939027501270175, 0.00761023536324501, 0.017492182552814484, 0.003492757212370634, 0...
[ "False", "GradientCheckpointingLayer", "LayerNorm", "ModelEncoderAttention", "ModelEncoderBlock", "ModelEncoderConvolutionModule", "ModelEncoderFeedForward", "None", "_", "__init__", "attention_mask", "attn_output", "class", "config", "conv", "conv_output", "conv_residual_weights", ...
lasr/modeling_lasr.py:LasrPreTrainedModel
[ -0.0002301838103448972, 0.03882790356874466, 0.01610216125845909, 0.0036543910391628742, -0.00117768463678658, 0.026380134746432304, 0.012048070318996906, -0.05321706831455231, -0.005995485465973616, -0.01678735949099064, 0.023525143042206764, 0.012961667962372303, -0.0026551433838903904, ...
[ "False", "ModelCTCConfig", "ModelEncoderAttention", "ModelEncoderBlock", "ModelPreTrainedModel", "None", "PreTrainedModel", "True", "_", "_can_compile_fullgraph", "_can_record_outputs", "_get_output_attention_mask", "_get_subsampling_output_length", "_init_weights", "_no_split_modules", ...
lasr/modeling_lasr.py:LasrEncoder
[ -0.00020552414935082197, 0.04467669874429703, 0.012076315470039845, 0.003614390268921852, -0.0010772299719974399, 0.03515171632170677, 0.021091029047966003, -0.02857494354248047, -0.003656912362203002, -0.008787929080426693, 0.011452656239271164, 0.006576772779226303, -0.002508812118321657, ...
[ "BaseModelOutput", "False", "LayerNorm", "ModelEncoder", "ModelEncoderBlock", "ModelEncoderConfig", "ModelEncoderRotaryEmbedding", "ModelEncoderSubsampling", "ModelPreTrainedModel", "ModuleList", "None", "True", "__init__", "_get_output_attention_mask", "arange", "attention_mask", "a...
lasr/modeling_lasr.py:LasrGenerateOutput
[ -0.00014792144065722823, 0.0064803678542375565, -0.01391870342195034, 0.017581520602107048, -0.0005071592167951167, 0.03381061553955078, 0.05635102465748787, -0.01228452380746603, 0.0037755186203867197, 0.0011270204558968544, 0.017243413254618645, 0.00039269620901905, -0.007043878082185984, ...
[ "LongTensor", "ModelGenerateOutput", "ModelOutput", "None", "attentions", "class", "hidden_states", "logits", "sequences", "torch" ]
lasr/modeling_lasr.py:LasrForCTC
[ -0.0002748770930338651, 0.03152875974774361, 0.027759017422795296, -0.003998212516307831, -0.0009352961205877364, 0.029244067147374153, 0.03289957717061043, -0.03609814494848251, 0.0013279777485877275, -0.008796066977083683, 0.03861130774021149, 0.003684067167341709, -0.0008067821618169546, ...
[ "CausalLMOutput", "Conv1d", "False", "ModelCTCConfig", "ModelEncoder", "ModelForCTC", "ModelGenerateOutput", "ModelPreTrainedModel", "None", "True", "__init__", "_get_output_attention_mask", "_get_subsampling_output_length", "argmax", "attention_mask", "attentions", "auto_docstring",...
llama4/modeling_llama4.py:Llama4TextExperts
[ -0.0004028682305943221, 0.027902763336896896, 0.0065341913141310215, 0.01660037785768509, -0.001721847802400589, 0.05180259793996811, 0.04991886764764786, -0.02319343574345112, -0.00497422693297267, -0.001677697873674333, 0.02178063802421093, -0.025548100471496582, -0.0013318565906956792, ...
[ "ACT2FN", "ModelTextExperts", "Module", "Parameter", "__init__", "act_fn", "bmm", "chunk", "class", "config", "def", "dim", "down_proj", "empty", "expert_dim", "forward", "gate", "gate_up", "gate_up_proj", "hidden_act", "hidden_size", "hidden_states", "intermediate_size",...
llama4/modeling_llama4.py:Llama4TextMLP
[ -0.00021289459255058318, 0.03137243166565895, 0.03045644983649254, 0.0196936447173357, -0.0007657048990949988, 0.05198206007480621, 0.024731554090976715, -0.013911498710513115, -0.0018033423693850636, -0.011163548566401005, 0.033433396369218826, -0.022441593930125237, -0.001932152546942234, ...
[ "ACT2FN", "Linear", "ModelTextMLP", "Module", "None", "__init__", "activation_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "if", "intermediate_size", "is", "nn", "return", "self", "super", "up_proj", "x" ]
llama4/modeling_llama4.py:Llama4TextL2Norm
[ 0.000021420270059024915, 0.03397012874484062, 0.05084271356463432, 0.03846948593854904, 0, 0.04476858302950859, 0.007761387620121241, -0.01473538763821125, 0.012035774998366833, 0.01754748448729515, 0.04161903262138367, 0.022496774792671204, 0.0029245808254927397, 0.015635259449481964, -...
[ "ModelTextL2Norm", "Module", "True", "__init__", "_norm", "class", "def", "eps", "extra_repr", "f", "float", "forward", "keepdim", "mean", "nn", "pow", "return", "rsqrt", "self", "super", "torch", "type_as", "x" ]
llama4/modeling_llama4.py:Llama4TextRMSNorm
[ -0.000034161021176259965, 0.04468525946140289, 0.03249837085604668, 0.05145575478672981, -0.0001939464418683201, 0.04310547932982445, 0.01986011676490307, -0.018618859350681305, 0.00868880096822977, 0.03159563988447189, 0.018280334770679474, 0.01726475916802883, 0.0024543041363358498, 0.01...
[ "ModelTextRMSNorm", "Module", "Parameter", "True", "__init__", "_norm", "class", "def", "eps", "extra_repr", "f", "float", "forward", "hidden_size", "keepdim", "mean", "nn", "ones", "output", "pow", "return", "rsqrt", "self", "shape", "super", "torch", "tuple", ...
llama4/modeling_llama4.py:Llama4Router
[ -0.0004005292721558362, 0.0348905473947525, -0.009138001129031181, -0.0013721835566684604, -0.0017652956303209066, 0.07737632095813751, 0.07452811300754547, -0.021717587485909462, -0.0016095342580229044, 0.013885014690458775, 0.02504049614071846, -0.019937457516789436, 0.00045059542753733695...
[ "Linear", "ModelRouter", "__init__", "class", "config", "def", "dim", "dtype", "float", "forward", "full_like", "functional", "hidden_size", "hidden_states", "inf", "nn", "num_experts", "num_experts_per_tok", "num_local_experts", "return", "router_indices", "router_logits",...
llama4/modeling_llama4.py:Llama4TextMoe
[ -0.0004514032043516636, 0.044409122318029404, 0.005521295592188835, 0.0024323544930666685, -0.0017832291778177023, 0.07210513204336166, 0.05754085257649422, -0.03390374034643173, -0.004536415915936232, -0.0030441738199442625, 0.035336289554834366, -0.022204561159014702, -0.001842918922193348...
[ "ModelRouter", "ModelTextExperts", "ModelTextMLP", "ModelTextMoe", "Module", "__init__", "add_", "class", "config", "def", "dim", "experts", "forward", "hidden_dim", "hidden_size", "hidden_states", "nn", "num_experts", "num_experts_per_tok", "num_local_experts", "out", "rep...
llama4/modeling_llama4.py:Llama4TextRotaryEmbedding
[ -0.00033367943251505494, 0.04834318161010742, 0.00353480177000165, -0.010912417434155941, -0.0018774050986394286, 0.045761749148368835, 0.03989486023783684, -0.017131321132183075, -0.0031094523146748543, 0.0253449697047472, 0.010384396649897099, -0.004253495950251818, -0.0012687151320278645,...
[ "False", "ModelTextRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "class", "clone", "compute_default_rope_parameters", "config", "cpu", "def", "default", "device", "device_type...
llama4/modeling_llama4.py:apply_rotary_emb
[ -0.0001581599353812635, 0.01521194726228714, 0.005861746612936258, -0.000350275106029585, -0.0007255698437802494, 0.03156764805316925, 0.049638986587524414, -0.031796399503946304, 0.012123807333409786, 0.029165763407945633, 0.018528837710618973, 0.014011004008352757, -0.0023161047138273716, ...
[ "Model_rotary_emb", "None", "def", "flatten", "float", "freqs_cis", "reshape", "return", "shape", "torch", "type_as", "view_as_complex", "view_as_real", "xk", "xk_", "xk_out", "xq", "xq_", "xq_out" ]
llama4/modeling_llama4.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
llama4/modeling_llama4.py:eager_attention_forward
[ 0, 0.018217450007796288, 0.018443753942847252, -0.017425386235117912, -0.00008751589484745637, 0.04163988307118416, 0.05499180406332016, -0.03779271990060806, 0.021498853340744972, 0.010127091780304909, 0.029419483616948128, 0.020593637600541115, 0.0025883486960083246, -0.01731223426759243...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", "p", "query", "rep...
llama4/modeling_llama4.py:vision_eager_attention_forward
[ -0.00015723906108178198, 0.013076604343950748, 0.025925789028406143, 0.0005188000504858792, -0.0006254028412513435, 0.04639351740479469, 0.04844029247760773, -0.03229352831840515, 0.01705644093453884, 0.01921692304313183, 0.03138384968042374, 0.017170149832963943, 0.0030133044347167015, -0...
[ "Model_eager_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "functional", "head_dim", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", "p...
llama4/modeling_llama4.py:Llama4TextAttention
[ -0.00009558228339301422, 0.04555530846118927, 0.031572986394166946, -0.01116330549120903, -0.0004087574197910726, 0.019845876842737198, 0.048712607473134995, -0.023454217240214348, -0.0019169312436133623, 0.015109929256141186, 0.02717532031238079, 0.029768815264105797, -0.0001347842335235327...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelTextAttention", "ModelTextL2Norm", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "and", "apply_rotary_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_scale", "attn_scales",...
llama4/modeling_llama4.py:Llama4TextDecoderLayer
[ -0.00016419781604781747, 0.05197831988334656, 0.017853422090411186, -0.0027119123842567205, -0.0006991648697294295, 0.03706280142068863, 0.03457688167691231, -0.051752325147390366, 0.005395575426518917, -0.009096206165850163, 0.014407034032046795, 0.02440720982849598, -0.0019491869024932384,...
[ "False", "GradientCheckpointingLayer", "ModelTextAttention", "ModelTextDecoderLayer", "ModelTextMLP", "ModelTextMoe", "ModelTextRMSNorm", "None", "Tensor", "_", "__init__", "attention_mask", "attention_states", "attention_type", "cache_position", "class", "config", "def", "else",...
llama4/modeling_llama4.py:Llama4PreTrainedModel
[ -0.0003051153034903109, 0.04410890117287636, 0.0004989532171748579, 0.02400718815624714, -0.001299432129599154, 0.03446007892489433, 0.017000306397676468, -0.01539216935634613, -0.00895962119102478, 0.011831293813884258, 0.009821122512221336, 0.00023152866924647242, -0.003589591709896922, ...
[ "False", "ModelConfig", "ModelPreTrainedModel", "ModelTextExperts", "ModelVisionModel", "PreTrainedModel", "True", "_can_compile_fullgraph", "_init_weights", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn", "_supports_flex_attn", "_supports_sdpa", "cl...
llama4/modeling_llama4.py:Llama4TextModel
[ -0.00007732728408882394, 0.048091452568769455, 0.010736696422100067, -0.011184059083461761, -0.0005696879816241562, 0.027960145846009254, 0.036683712154626846, -0.032657451927661896, 0.004893025849014521, 0.003299297299236059, 0.023710204288363457, 0.00441770302131772, -0.003718699561432004,...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelPreTrainedModel", "ModelTextAttention", "ModelTextConfig", "ModelTextDecoderLayer", "ModelTextModel", "ModelTextMoe", "ModelTextRMSNorm", "ModelTextRotaryEmbedding", "ModuleList", "None", "ValueError", "You", "__i...
llama4/modeling_llama4.py:Llama4ForCausalLM
[ -0.00025303266011178493, 0.045071665197610855, 0.010192085057497025, -0.009172876365482807, -0.0010333640966564417, 0.02536696568131447, 0.03737097606062889, -0.026612665504217148, 0.0037654091138392687, 0.021290132775902748, 0.024913985282182693, 0.0017482256516814232, -0.000651160953566432...
[ "CausalLMOutputWithPast", "GenerationMixin", "Linear", "ModelForCausalLM", "ModelPreTrainedModel", "ModelTextConfig", "ModelTextDecoderLayer", "ModelTextModel", "None", "__init__", "_no_split_modules", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", ...
llama4/modeling_llama4.py:Llama4CausalLMOutputWithPast
[ -0.00020158913685008883, 0.02637176588177681, 0.02978190779685974, 0.009434727020561695, -0.0011793408775702119, 0.03387407958507538, 0.03773890808224678, -0.030463937669992447, 0.01784641109406948, -0.004205842036753893, 0.020006166771054268, 0.018642110750079155, -0.0008596400148235261, ...
[ "ModelCausalLMOutputWithPast", "ModelOutput", "None", "attentions", "class", "hidden_states", "image_hidden_states", "logits", "loss", "past_key_values", "r" ]
llama4/modeling_llama4.py:Llama4VisionMLP2
[ -0.0002747578255366534, 0.04179173707962036, 0.019525647163391113, 0.02774697355926037, -0.00046744514838792384, 0.03676759451627731, 0.029117194935679436, -0.02146679349243641, -0.007764585316181183, -0.011532692238688469, 0.04498891904950142, -0.01826961152255535, -0.002726168604567647, ...
[ "F", "GELU", "Linear", "ModelVisionMLP2", "Module", "__init__", "activation_fn", "class", "config", "def", "dropout", "fc1", "fc2", "forward", "hidden_size", "hidden_states", "intermediate_size", "nn", "p", "projector_dropout", "projector_input_dim", "projector_output_dim",...
llama4/modeling_llama4.py:Llama4MultiModalProjector
[ -0.00023902901739347726, 0.029009269550442696, 0.04328726977109909, 0.048046600073575974, -0.0005842931568622589, 0.024929840117692947, 0.031048983335494995, -0.0335419662296772, -0.0013739741407334805, 0.021870268508791924, 0.027762776240706444, -0.00929203163832426, -0.0020255495328456163,...
[ "Linear", "ModelMultiModalProjector", "Module", "__init__", "class", "config", "def", "forward", "hidden_size", "hidden_states", "image_features", "linear_1", "nn", "return", "self", "super", "text_config", "vision_config", "vision_output_dim" ]
llama4/modeling_llama4.py:pixel_shuffle
[ 0.00008654829434817657, 0.03162013739347458, 0.0320654921233654, -0.01469668373465538, 0.0005566925392486155, 0.014362667687237263, -0.01937290094792843, -0.03585100173950195, 0.005010233260691166, 0.039859186857938766, -0.00019745189638342708, -0.0367417074739933, -0.0018370854668319225, ...
[ "Model_shuffle", "batch_size", "channels", "contiguous", "def", "height", "input_tensor", "int", "math", "num_patches", "output_tensor", "patch_size", "permute", "reshaped_tensor", "return", "shape", "shuffle_ratio", "size", "sqrt", "view", "width" ]
llama4/modeling_llama4.py:Llama4VisionPixelShuffleMLP
[ -0.0002189454680774361, 0.03412724286317825, 0.0017445010598748922, 0.029155060648918152, -0.0007486522663384676, 0.03390123322606087, 0.02384386770427227, -0.054919999092817307, 0.0021894546225667, 0.006667242851108313, 0.02203580178320408, -0.032771192491054535, -0.001836316892877221, -0...
[ "ModelVisionMLP2", "ModelVisionPixelShuffleMLP", "Module", "__init__", "class", "config", "def", "encoded_patches", "forward", "inner_dim", "int", "mlp", "nn", "output_dim", "pixel_shuffle", "pixel_shuffle_ratio", "projector_input_dim", "projector_output_dim", "return", "self",...
llama4/modeling_llama4.py:reshape_for_broadcast
[ -0.00016789128130767494, -0.011147263459861279, -0.004079668316990137, 0.014479950070381165, -0.0005099585396237671, 0.016663433983922005, 0.05125442519783974, -0.042980168014764786, 0.019996121525764465, 0.03171798586845398, 0.02355864830315113, 0.009998060762882233, 0.00042197274160571396,...
[ "Model_for_broadcast", "d", "def", "else", "enumerate", "for", "freqs_ci", "i", "if", "in", "ndim", "or", "query", "return", "shape", "view" ]
llama4/modeling_llama4.py:vision_apply_rotary_emb
[ -0.0000810419223853387, 0.013037564232945442, 0.011166957207024097, -0.014624745585024357, -0.0005207940121181309, 0.03174363449215889, 0.061673346906900406, -0.011960548348724842, 0.01428463589400053, 0.031063413247466087, 0.019272921606898308, 0.01264076866209507, -0.0018847782630473375, ...
[ "Model_apply_rotary_emb", "def", "device", "flatten", "float", "freqs_ci", "key", "key_", "key_out", "query", "query_", "query_out", "reshape", "reshape_for_broadcast", "return", "shape", "to", "torch", "type_as", "view_as_complex", "view_as_real" ]
llama4/modeling_llama4.py:Llama4VisionAttention
[ -0.00011382484080968425, 0.032527435570955276, 0.023379093036055565, 0.019087279215455055, -0.0004870644479524344, 0.023943806067109108, 0.054890044033527374, -0.01795785501599312, 0.0038965155836194754, 0.03297920525074005, 0.032753318548202515, 0.02462146058678627, -0.000988246756605804, ...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelVisionAttention", "Module", "None", "__init__", "_attn_implementation", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "class", "config", "contiguous", "def", "dropout", "else", "e...
llama4/modeling_llama4.py:Llama4VisionMLP
[ -0.00014230338274501264, 0.03938957676291466, 0.025614608079195023, 0.041438743472099304, -0.00027927037444896996, 0.042121801525354385, 0.03506355360150337, -0.02982678823173046, 0.0035148935858160257, 0.002476078923791647, 0.046675510704517365, -0.01582413539290428, 0.0004446980601642281, ...
[ "GELU", "Linear", "ModelVisionMLP", "Module", "__init__", "activation_fn", "class", "config", "def", "fc1", "fc2", "forward", "hidden_size", "hidden_states", "intermediate_size", "nn", "return", "self", "super" ]
llama4/modeling_llama4.py:Llama4VisionEncoderLayer
[ -0.00002215261156379711, 0.03481249883770943, 0.027513103559613228, 0.03166814520955086, 0.00018950352387037128, 0.04042741656303406, 0.032566532492637634, -0.01953991875052452, 0.006007963325828314, 0.026165522634983063, 0.024930240586400032, 0.02436874993145466, 0.0018529233057051897, -0...
[ "GradientCheckpointingLayer", "LayerNorm", "ModelVisionAttention", "ModelVisionEncoderLayer", "ModelVisionMLP", "None", "__init__", "attention_mask", "attn_weights", "class", "config", "def", "forward", "freqs_ci", "hidden_size", "hidden_state", "if", "input_layernorm", "mlp", ...
llama4/modeling_llama4.py:Llama4VisionEncoder
[ -0.00009992382547352463, 0.032003797590732574, 0.013015628792345524, 0.047329556196928024, -0.0001901633950183168, 0.035609856247901917, 0.029524628072977066, -0.04372349753975868, 0.009409566409885883, 0.01893182285130024, 0.03403220698237419, 0.007719225715845823, 0.001690341392531991, -...
[ "BaseModelOutput", "False", "ModelVisionEncoder", "ModelVisionEncoderLayer", "Module", "ModuleList", "None", "_", "__init__", "all_attentions", "attention_mask", "attentions", "class", "config", "def", "else", "encoder_layer", "encoder_states", "for", "forward", "freqs_ci", ...
llama4/modeling_llama4.py:Llama4UnfoldConvolution
[ -0.00012614573643077165, 0.022921472787857056, 0.02958337962627411, -0.0022723875008523464, -0.000589268165640533, 0.004036663565784693, 0.012759244069457054, -0.052391938865184784, 0.015017516911029816, 0.00046224030666053295, -0.014791689813137054, -0.01817910000681877, -0.0008715523290447...
[ "Linear", "ModelUnfoldConvolution", "Module", "Unfold", "__init__", "class", "config", "def", "forward", "hidden_size", "hidden_states", "if", "int", "isinstance", "kernel_size", "linear", "nn", "num_channels", "patch_size", "permute", "return", "self", "stride", "super...
llama4/modeling_llama4.py:Llama4VisionRotaryEmbedding
[ -0.00024002166173886508, 0.032088227570056915, 0.021961092948913574, 0.01797851175069809, -0.0011307686800137162, 0.021050788462162018, 0.04528764262795448, -0.03368126228451729, 0.006770388688892126, 0.0314055010676384, 0.010923651978373528, 0.00904614944010973, -0.00009289727313444018, 0...
[ "ModelVisionRotaryEmbedding", "Module", "None", "__init__", "arange", "cat", "class", "config", "contiguous", "cos", "def", "device", "dim", "dtype", "float", "forward", "freq_cis", "freq_dim", "freqs", "freqs_ci", "freqs_x", "freqs_y", "frequencies_x", "frequencies_y",...
llama4/modeling_llama4.py:Llama4VisionModel
[ 0.00005725110531784594, 0.04698960855603218, -0.00598558085039258, 0.035130325704813004, 0.000393328198697418, 0.019243363291025162, 0.036696646362543106, -0.012754322029650211, 0.007607841398566961, 0.034682806581258774, 0.021480964496731758, 0.004559110850095749, -0.0002429894229862839, ...
[ "BaseModelOutput", "LayerNorm", "ModelPreTrainedModel", "ModelUnfoldConvolution", "ModelVisionConfig", "ModelVisionEncoder", "ModelVisionEncoderLayer", "ModelVisionModel", "ModelVisionPixelShuffleMLP", "ModelVisionRotaryEmbedding", "None", "Parameter", "_", "__init__", "_no_split_modules...
llama4/modeling_llama4.py:Llama4ForConditionalGeneration
[ -0.00035283766919746995, 0.04311034828424454, -0.0006272669997997582, 0.019730398431420326, -0.0010549490107223392, 0.03124929964542389, 0.025889018550515175, -0.04790038615465164, -0.007356130983680487, 0.025432825088500977, 0.034442659467458725, -0.003278895514085889, -0.000399169890442863...
[ "CrossEntropyLoss", "False", "GenerationMixin", "Image", "ModelCausalLMOutputWithPast", "ModelConfig", "ModelForCausalLM", "ModelForConditionalGeneration", "ModelMultiModalProjector", "ModelPreTrainedModel", "ModelTextDecoderLayer", "ModelVisionEncoderLayer", "ModelVisionModel", "None", ...
git/modeling_git.py:GitVisionModelOutput
[ -0.000019143861209158786, 0.035622019320726395, 0.021619653329253197, 0.033381640911102295, -0.00016365265764761716, 0.037190284579992294, 0.04928832873702049, -0.017698992043733597, 0.022851862013339996, -0.004088690970093012, 0.031589340418577194, 0.04794410243630409, -0.000418320705648511...
[ "ModelOutput", "ModelVisionModelOutput", "None", "attentions", "class", "hidden_states", "image_embeds", "last_hidden_state", "r" ]
git/modeling_git.py:token_type_ids_mask_function
[ -0.00002404505357844755, -0.008736887015402317, -0.01225433498620987, -0.041982442140579224, 0.00020565724116750062, 0.03994005173444748, 0.007999357767403126, -0.06217940151691437, 0.017133375629782677, 0.007006529252976179, 0.03608220815658569, 0.02825304865837097, 0.002709002234041691, ...
[ "Model_type_ids", "Model_type_ids_at_kv_idx", "Model_type_ids_at_q_idx", "Model_type_ids_mask_function", "None", "batch_idx", "def", "head_idx", "if", "image_group_ids", "image_group_ids_at_kv_idx", "image_group_ids_at_q_idx", "inner_mask", "is", "is_image_block", "kv_idx", "q_idx", ...
git/modeling_git.py:create_causal_mask_mapping
[ -0.0001699117274256423, 0.02344781905412674, 0.012403556145727634, -0.026732778176665306, -0.0008318595355376601, 0.03715403005480766, 0.004332749173045158, -0.05890273302793503, 0.01002479251474142, 0.009968155063688755, 0.04463014751672745, 0.03760712966322899, 0.00024070827930700034, 0....
[ "False", "Model_causal_mask_mapping", "Model_masks_for_generate", "None", "and", "attention_mask", "cache_position", "config", "cumsum", "def", "device", "dim", "else", "functional", "get_text_config", "if", "image_group_ids", "input_embeds", "int", "is", "is_first_iteration"...
git/modeling_git.py:GitEmbeddings
[ -0.00020230792870279402, 0.01715003326535225, 0.006757794879376888, 0.0014055076753720641, -0.0012067490024492145, 0.032255690544843674, 0.032255690544843674, -0.012550190091133118, 0.011300849728286266, -0.01635499857366085, 0.021806664764881134, 0.03134708106517792, 0.001334522501565516, ...
[ "Dropout", "Embedding", "False", "LayerNorm", "ModelEmbeddings", "Module", "None", "__init__", "arange", "class", "config", "def", "dropout", "else", "embeddings", "eps", "expand", "forward", "hidden_dropout_prob", "hidden_size", "if", "input_ids", "input_shape", "input...
git/modeling_git.py:GitSelfAttention
[ 0.000011525665286171716, 0.03629758208990097, 0.03139852359890938, -0.003618624061346054, -0.00009829435293795541, 0.009909462183713913, 0.021489059552550316, -0.020375637337565422, 0.00439801998436451, 0.020486978814005852, 0.009241408668458462, 0.023938588798046112, -0.0011830116854980588,...
[ "Dropout", "Instantiating", "Linear", "ModelSelfAttention", "Module", "None", "Please", "The", "ValueError", "__class__", "__init__", "__name__", "a", "all_head_size", "and", "attention", "attention_head_size", "attention_mask", "attention_probs", "attention_probs_dropout_prob"...
git/modeling_git.py:GitSelfOutput
[ -0.00012168083776487038, 0.04875698313117027, 0.03859927877783775, 0.020879726856946945, -0.0006630723946727812, 0.05620596557855606, 0.02370131015777588, -0.01952536590397358, 0.0035551965702325106, 0.017155233770608902, 0.01365646906197071, 0.003512872848659754, 0.0036962758749723434, -0...
[ "Dropout", "LayerNorm", "Linear", "ModelSelfOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "layer_norm_eps", "nn", "return", "self", "super" ]
git/modeling_git.py:GitAttention
[ 0.00013643610873259604, 0.031096870079636574, 0.03515298292040825, -0.006760189309716225, 0.0003573745780158788, 0.03718104213476181, 0.04349055141210556, -0.02185794524848461, 0.013238703832030296, -0.005351816304028034, 0.012844359502196312, 0.03560366481542587, 0.002563238376751542, -0....
[ "False", "ModelAttention", "ModelSelfOutput", "Model_SELF_ATTENTION_CLASSES", "Module", "None", "__init__", "_attn_implementation", "attention_mask", "attention_output", "attn_output", "cache_position", "class", "config", "def", "forward", "hidden_states", "layer_idx", "nn", "o...
git/modeling_git.py:GitIntermediate
[ -0.00025367451598867774, 0.02240910567343235, 0.04047359153628349, 0.012862369418144226, -0.0009432404185645282, 0.03635763004422188, 0.03452831506729126, -0.018864808604121208, -0.001329111517407, -0.003772961674258113, 0.023209432139992714, -0.02103712037205696, -0.0013719861162826419, 0...
[ "ACT2FN", "Linear", "ModelIntermediate", "Module", "__init__", "class", "config", "def", "dense", "else", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_size", "isinstance", "nn", "return", "self", "str", "super" ]
git/modeling_git.py:GitOutput
[ -0.0002591986849438399, 0.03935529664158821, 0.0507957898080349, 0.030660521239042282, -0.0012513039400801063, 0.04988054931163788, 0.03798243775963783, -0.023681821301579475, 0.002259497530758381, 0.014758236706256866, 0.011040075682103634, 0.00267421524040401, 0.0013013561256229877, 0.00...
[ "Dropout", "LayerNorm", "Linear", "ModelOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "intermediate_size", "layer_norm_eps", "nn", "return", "self", "su...
git/modeling_git.py:GitLayer
[ -0.000022415990315494128, 0.010189325548708439, 0.022618062794208527, 0.00009272566239815205, 0.00009578735625836998, 0.038293950259685516, 0.03179965168237686, -0.005122655536979437, 0.006074405740946531, 0.0029952137265354395, 0.00233738636597991, 0.011476987041532993, 0.002365379128605127...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelIntermediate", "ModelLayer", "ModelOutput", "None", "__init__", "apply_chunking_to_forward", "attention", "attention_mask", "attention_output", "cache_position", "chunk_size_feed_forward", "class", "config", "def", "fee...
git/modeling_git.py:GitEncoder
[ -0.00017288810340687633, 0.014639632776379585, 0.01475311815738678, 0.01759025640785694, -0.0008901521214284003, 0.034272629767656326, 0.011348553001880646, -0.030414121225476265, 0.00558916199952364, 0.012313179671764374, 0.00788724422454834, 0.001376011990942061, -0.0009929983643814921, ...
[ "BaseModelOutputWithPast", "DynamicCache", "False", "ModelEncoder", "ModelLayer", "Module", "ModuleList", "None", "Setting", "True", "__init__", "all_hidden_states", "all_self_attentions", "and", "attention_mask", "attentions", "cache_position", "checkpointing", "class", "confi...
git/modeling_git.py:GitPreTrainedModel
[ -0.0002070020418614149, 0.046198610216379166, -0.00024415625375695527, 0.029893217608332634, -0.0010261639254167676, 0.02615656517446041, 0.011889347806572914, 0.0007501612417399883, 0.00023000227520242333, 0.012681971304118633, 0.009454862214624882, 0.002717565279453993, -0.0028024890925735...
[ "Embedding", "False", "LayerNorm", "Linear", "Model", "ModelConfig", "ModelEmbeddings", "ModelPreTrainedModel", "ModelVisionEmbeddings", "None", "PreTrainedModel", "True", "_init_weights", "_is_hf_initialized", "and", "arange", "base_model_prefix", "bias", "class", "class_embed...
git/modeling_git.py:GitVisionEmbeddings
[ -0.00008226509089581668, 0.020499760285019875, 0.01657904125750065, 0.026772910729050636, -0.0003518145240377635, 0.008905633352696896, 0.026772910729050636, -0.013050394132733345, 0.003066562581807375, 0.02105986326932907, 0.025428663939237595, 0.03158979490399361, -0.0001986614370252937, ...
[ "Conv2d", "Embedding", "False", "Input", "ModelVisionEmbeddings", "Module", "Parameter", "ValueError", "_", "__init__", "align_corners", "and", "arange", "batch_size", "bicubic", "cat", "class", "class_embedding", "class_embeds", "class_pos_embed", "config", "def", "dim",...
git/modeling_git.py:GitVisionMLP
[ -0.0001662031572777778, 0.03708730265498161, 0.032536715269088745, 0.034129418432712555, -0.00047283468302339315, 0.04709859937429428, 0.03162659704685211, -0.03481200709939003, 0.003341838950291276, 0, 0.049373894929885864, -0.020136358216404915, 0.0019482210045680404, -0.0018913387320935...
[ "ACT2FN", "Linear", "ModelVisionMLP", "Module", "__init__", "activation_fn", "class", "config", "def", "fc1", "fc2", "forward", "hidden_act", "hidden_size", "hidden_states", "intermediate_size", "nn", "return", "self", "super" ]
git/modeling_git.py:eager_attention_forward
[ 0.000051929029723396525, 0.031000085175037384, 0.02477744035422802, -0.010239078663289547, 0.00008883178816176951, 0.035978201776742935, 0.05928483232855797, -0.020365018397569656, 0.022175243124365807, 0.01170988567173481, 0.02477744035422802, 0.031452640891075134, 0.0025173425674438477, ...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "softmax", "...
git/modeling_git.py:GitVisionAttention
[ -0.00009132608829531819, 0.03574362397193909, 0.03574362397193909, 0.01663539744913578, -0.0003898921422660351, 0.01449977234005928, 0.03686763718724251, -0.02573990635573864, 0.001145088579505682, 0.04518533870577812, 0.025627505034208298, 0.015848588198423386, -0.0013347658095881343, -0....
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelVisionAttention", "Module", "None", "ValueError", "__init__", "_attn_implementation", "and", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "batch_size", "be", "by", "causal_attentio...
git/modeling_git.py:GitVisionEncoderLayer
[ -0.000021377712982939556, 0.03412716090679169, 0.030310308560729027, 0.03210647404193878, 0.00018330018792767078, 0.04423059895634651, 0.029412224888801575, -0.021666258573532104, 0.008812441490590572, 0.02705475687980652, 0.021104956045746803, 0.0204313937574625, 0.0009892948437482119, -0...
[ "False", "GradientCheckpointingLayer", "LayerNorm", "ModelVisionAttention", "ModelVisionEncoderLayer", "ModelVisionMLP", "__init__", "attention_mask", "attn_weights", "causal_attention_mask", "class", "config", "def", "embed_dim", "eps", "forward", "hidden_size", "hidden_states", ...
git/modeling_git.py:GitVisionEncoder
[ -0.00007597586227348074, 0.032378893345594406, 0.018550407141447067, 0.05463938042521477, -0.00018620672926772386, 0.039799053221940994, 0.019000113010406494, -0.03552683815360069, 0.01006219070404768, 0.01663915254175663, 0.0250711552798748, 0.008881709538400173, 0.00032147011370398104, -...
[ "BaseModelOutput", "False", "ModelVisionEncoder", "ModelVisionEncoderLayer", "Module", "ModuleList", "None", "_", "__init__", "all_attentions", "attention_mask", "attentions", "can_return_tuple", "causal_attention_mask", "class", "config", "def", "else", "encoder_layer", "encod...
git/modeling_git.py:GitVisionTransformer
[ -0.00014280017057899386, 0.06047675758600235, 0.01929389126598835, 0.04806548357009888, -0.0005147858173586428, 0.018955402076244354, 0.03971607983112335, -0.02798178233206272, 0.002637395868077874, 0.0333976112306118, 0.026966314762830734, 0.02561235800385475, 0.0004319264553487301, -0.00...
[ "BaseModelOutput", "False", "LayerNorm", "ModelVisionEmbeddings", "ModelVisionEncoder", "ModelVisionTransformer", "Module", "None", "__init__", "attentions", "auto_docstring", "class", "config", "def", "else", "embed_dim", "embeddings", "encoder", "encoder_outputs", "eps", "f...
git/modeling_git.py:GitVisionModel
[ 0.000032291387469740584, 0.04825904220342636, -0.0009146317024715245, 0.03597085922956467, 0.00018327543511986732, 0.010389098897576332, 0.04781219735741615, -0.01552779320627451, 0.00583688635379076, 0.028039397671818733, 0.01899082586169243, 0.010668376460671425, -0.0010612520854920149, ...
[ "False", "ModelPreTrainedModel", "ModelVisionConfig", "ModelVisionModel", "ModelVisionTransformer", "None", "__init__", "auto_docstring", "class", "config", "def", "else", "embeddings", "forward", "get_input_embeddings", "if", "image", "input_modalities", "interpolate_pos_encodin...
git/modeling_git.py:GitProjection
[ -0.0001977721112780273, 0.03473694249987602, 0.02713114209473133, 0.05675971135497093, -0.0006562841590493917, 0.03473694249987602, 0.027585219591856003, -0.014360206201672554, 0.003746140981093049, 0.037461407482624054, 0.01680087484419346, 0.02440667524933815, -0.0007733510574325919, -0....
[ "LayerNorm", "Linear", "ModelProjection", "Module", "Sequential", "__init__", "class", "config", "def", "embeddings", "eps", "forward", "hidden_size", "layer_norm_eps", "nn", "return", "self", "super", "vision_config", "visual_projection" ]
git/modeling_git.py:GitModel
[ 0.00003327899321448058, 0.03340949863195419, -0.0017679192824289203, 0.024388933554291725, 0.0001905385433929041, 0.03519133850932121, 0.027841247618198395, -0.03764136880636215, 0.010468309745192528, 0.029400357976555824, 0.03652771934866905, 0.023275284096598625, 0.001078848377801478, 0....
[ "BaseModelOutputWithPast", "Cache", "False", "ModelEmbeddings", "ModelEncoder", "ModelModel", "ModelPreTrainedModel", "ModelProjection", "ModelVisionModel", "None", "Parameter", "ParameterList", "_", "__init__", "and", "append", "arange", "attention_mask", "attentions", "auto_d...
git/modeling_git.py:GitForCausalLM
[ -0.0001303315511904657, 0.046800397336483, 0.02060113288462162, 0.014779073186218739, -0.0005073308711871505, 0.0376194603741169, 0.021160945296287537, -0.010860379785299301, 0.0028690434992313385, 0.017914028838276863, 0.029558146372437477, 0.007557480596005917, 0.003400866175070405, 0.00...
[ "CausalLMOutputWithPast", "False", "GenerationMixin", "Linear", "Model", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "_tied_weights_keys", "attention", "attention_mask", "attentions", "auto_docstring", "cache_position", "class", "config", "contig...
lxmert/modeling_lxmert.py:GeLU
[ -0.00005705264993594028, 0.029895588755607605, 0.004763896111398935, 0.04130611941218376, -0.0001631349150557071, 0.03445979952812195, 0.04472927749156952, -0.0037369485944509506, 0.009756002575159073, -0.00013550004223361611, 0.031493064016103745, -0.034003376960754395, -0.00028169745928607...
[ "GeLU", "Module", "__init__", "class", "def", "forward", "gelu", "nn", "return", "self", "super", "x" ]
lxmert/modeling_lxmert.py:LxmertModelOutput
[ -0.000052556719310814515, 0.03490293771028519, 0.01925291121006012, 0.027359401807188988, -0.0004503605014178902, 0.04593677073717117, 0.05584470182657242, -0.00962645560503006, 0.018127011135220528, 0.007825014181435108, 0.02139212377369404, 0.03287631645798683, -0.0010414586868137121, 0,...
[ "ModelModelOutput", "ModelOutput", "None", "class", "cross_encoder_attentions", "language_attentions", "language_hidden_states", "language_output", "pooled_output", "r", "vision_attentions", "vision_hidden_states", "vision_output" ]
lxmert/modeling_lxmert.py:LxmertForQuestionAnsweringOutput
[ -0.00017108538304455578, 0.030252128839492798, 0.01907690241932869, 0.019189782440662384, -0.0007760574226267636, 0.04492666944861412, 0.06772864609956741, 0.01715792343020439, 0.017835209146142006, 0.01625487580895424, 0.010723702609539032, 0.043346334248781204, 0.00012787309242412448, -0...
[ "ModelForQuestionAnsweringOutput", "ModelOutput", "None", "class", "cross_encoder_attentions", "language_attentions", "language_hidden_states", "loss", "question_answering_score", "r", "vision_attentions", "vision_hidden_states" ]
lxmert/modeling_lxmert.py:LxmertForPreTrainingOutput
[ -0.0001871986169135198, 0.03842869773507118, 0.018762245774269104, 0.02203998900949955, -0.0007099419017322361, 0.04927915334701538, 0.05153966322541237, 0.003390767378732562, 0.015597529709339142, 0.006329432595521212, 0.014693325385451317, 0.03413372486829758, -0.00014658004511147738, 0....
[ "ModelForPreTrainingOutput", "ModelOutput", "None", "class", "cross_encoder_attentions", "cross_relationship_score", "language_attentions", "language_hidden_states", "loss", "prediction_logits", "question_answering_score", "r", "vision_attentions", "vision_hidden_states" ]
lxmert/modeling_lxmert.py:LxmertEmbeddings
[ -0.00017789094999898225, 0.02016451396048069, 0.017785554751753807, -0.0023931199684739113, -0.000856708618812263, 0.04486037790775299, 0.03194602578878403, -0.005805794149637222, 0.009629121981561184, -0.010082256980240345, 0.027981095016002655, 0.01914495974779129, -0.00017258076695725322,...
[ "Dropout", "Embedding", "LayerNorm", "ModelEmbeddings", "Module", "None", "__init__", "arange", "class", "config", "def", "device", "dropout", "dtype", "else", "embeddings", "eps", "expand", "forward", "hidden_dropout_prob", "hidden_size", "if", "input_ids", "input_shap...
lxmert/modeling_lxmert.py:LxmertAttention
[ -0.000011948266546824016, 0.02994508482515812, 0.0491635724902153, -0.010950068011879921, -0.0003055264533031732, 0.01586642488837242, 0.025140462443232536, -0.008547757752239704, 0.0007158048683777452, 0.014749071560800076, 0.015196013264358044, 0.03597879782319069, -0.0014595435932278633, ...
[ "Dropout", "False", "Linear", "ModelAttention", "Module", "None", "The", "ValueError", "_", "__init__", "a", "attention", "attention_head_size", "attention_mask", "attention_probs", "attention_probs_dropout_prob", "attention_scores", "batch_size", "class", "config", "context"...
lxmert/modeling_lxmert.py:LxmertAttentionOutput
[ -0.0001193950156448409, 0.050342559814453125, 0.06113024801015854, 0.014046472497284412, -0.0006496493588201702, 0.047420892864465714, 0.027306342497467995, -0.03348679095506668, 0.002177203306928277, 0.01752999797463417, 0.01281038299202919, 0.03326204791665077, 0.0037082687485963106, -0....
[ "Dropout", "LayerNorm", "Linear", "ModelAttentionOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "nn", "return", "self", "super" ]
lxmert/modeling_lxmert.py:LxmertCrossAttentionLayer
[ -0.00004358119622338563, 0.04236180707812309, 0.05255582556128502, -0.010363918729126453, -0.0006512844702228904, 0.044853679835796356, 0.02842998318374157, -0.019934969022870064, 0.006739378906786442, 0.0013875191798433661, 0.011213419958949089, 0.04462714493274689, 0.0019396950956434011, ...
[ "False", "ModelAttention", "ModelAttentionOutput", "ModelCrossAttentionLayer", "Module", "None", "__init__", "att", "attention_output", "attention_probs", "class", "config", "ctx_att_mask", "ctx_tensor", "def", "else", "forward", "if", "input_tensor", "nn", "output", "outpu...
lxmert/modeling_lxmert.py:LxmertSelfAttentionLayer
[ 0.00020808607223443687, 0.04850973188877106, 0.036495640873909, -0.00008633358083898202, 0.0008004672708921134, 0.04465615749359131, 0.02482156828045845, -0.000683584890794009, 0.010257314890623093, 0.005553684197366238, 0.016661053523421288, 0.01904120296239853, 0.0034285499714314938, -0....
[ "False", "ModelAttention", "ModelAttentionOutput", "ModelSelfAttentionLayer", "Module", "__init__", "attention_mask", "attention_output", "attention_probs", "class", "config", "def", "else", "forward", "if", "input_tensor", "nn", "output", "output_attentions", "outputs", "ret...
lxmert/modeling_lxmert.py:LxmertIntermediate
[ -0.0002685948566067964, 0.0166170671582222, 0.039880964905023575, 0.010657843202352524, -0.001009916653856635, 0.04194376990199089, 0.03804735466837883, -0.021774088963866234, 0.00043333301437087357, 0.00015309907030314207, 0.018679875880479813, -0.020742684602737427, -0.0019195578061044216,...
[ "ACT2FN", "Linear", "ModelIntermediate", "Module", "__init__", "class", "config", "def", "dense", "forward", "hidden_act", "hidden_size", "hidden_states", "intermediate_act_fn", "intermediate_size", "nn", "return", "self", "super" ]
lxmert/modeling_lxmert.py:LxmertOutput
[ -0.00022269411419983953, 0.04059090092778206, 0.0478881411254406, 0.03283758461475372, -0.0011473200283944607, 0.05108068510890007, 0.035117968916893005, -0.02257583849132061, 0.0013468540273606777, 0.018471140414476395, 0.00974865723401308, 0.00664162915199995, 0.0016105237882584333, 0.00...
[ "Dropout", "LayerNorm", "Linear", "ModelOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "intermediate_size", "nn", "return", "self", "super" ]
lxmert/modeling_lxmert.py:LxmertLayer
[ 0.000020229741494404152, 0.01508610974997282, 0.026907315477728844, 0.011427165009081364, 0.00017327212844975293, 0.05539079010486603, 0.039404019713401794, -0.011258291080594063, 0.014860943891108036, 0.00021549072698689997, 0.01801326498389244, -0.010639085434377193, 0.0005558780976571143,...
[ "False", "ModelIntermediate", "ModelLayer", "ModelOutput", "ModelSelfAttentionLayer", "Module", "None", "__init__", "attention", "attention_mask", "attention_output", "class", "config", "def", "forward", "hidden_states", "intermediate", "intermediate_output", "layer_output", "n...
lxmert/modeling_lxmert.py:LxmertXLayer
[ -0.00006715262134093791, 0.0287641454488039, 0.013146113604307175, 0.02056187018752098, -0.00028792236116714776, 0.05977549031376839, 0.022471988573670387, -0.03460686281323433, 0.006797776557505131, 0.00589889707043767, 0.029887745156884193, 0.02483154833316803, 0.0025561887305229902, -0....
[ "False", "ModelCrossAttentionLayer", "ModelIntermediate", "ModelOutput", "ModelSelfAttentionLayer", "ModelXLayer", "Module", "__init__", "attention_probs", "class", "config", "cross_att", "ctx_att_mask", "def", "else", "forward", "if", "lang_att_output", "lang_attention_mask", ...