identifier
stringlengths
24
102
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
luke/modeling_luke.py:EntityPredictionHeadTransform
[ -0.00035326482611708343, 0.039884503930807114, 0.042435258626937866, 0.03640620410442352, -0.0015942207537591457, 0.031536586582660675, 0.032232243567705154, -0.009797210805118084, -0.004289903212338686, 0.017739329487085342, 0.014608859084546566, 0.004463817924261093, 0.00012137817248003557...
[ "ACT2FN", "LayerNorm", "Linear", "ModelPredictionHeadTransform", "Model_emb_size", "Module", "__init__", "class", "config", "def", "dense", "else", "eps", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "isinstance", "layer_norm_eps", "nn", "return", "self"...
luke/modeling_luke.py:EntityPredictionHead
[ -0.0003552789567038417, 0.03716542571783066, 0.033933646976947784, 0.014427571557462215, -0.001803446444682777, 0.02089112438261509, 0.05655607953667641, -0.02758551575243473, -0.002236273605376482, 0.003462617052718997, 0.016851402819156647, 0.006838669069111347, -0.000973861082457006, 0....
[ "Linear", "ModelPredictionHead", "ModelPredictionHeadTransform", "Model_emb_size", "Model_vocab_size", "Module", "Parameter", "__init__", "bias", "class", "config", "decoder", "def", "forward", "hidden_states", "nn", "return", "self", "super", "torch", "transform", "zeros" ...
luke/modeling_luke.py:LukePreTrainedModel
[ -0.00020356893946882337, 0.04173603653907776, 0.004342803731560707, 0.02436482161283493, -0.0008178007556125522, 0.02481602318584919, 0.01065960992127657, 0.006655205972492695, -0.0013606512220576406, 0.010152009315788746, 0.005724605172872543, 0.0020163017325103283, -0.0015933014219626784, ...
[ "Embedding", "False", "LayerNorm", "Linear", "Model", "ModelAttention", "ModelConfig", "ModelEntityEmbeddings", "ModelPreTrainedModel", "None", "PreTrainedModel", "True", "_init_weights", "_is_hf_initialized", "_no_split_modules", "and", "base_model_prefix", "bias", "class", "c...
luke/modeling_luke.py:LukeModel
[ -0.00011193323734914884, 0.025157656520605087, 0.00846109539270401, 0.010604572482407093, -0.00047946206177584827, 0.056632932275533676, 0.005809952039271593, 0.007332949433475733, 0.002707550534978509, 0.0075021712109446526, 0.02606017328798771, 0.014045418240129948, 0.000757973117288202, ...
[ "BaseModelModelOutputWithPooling", "ModelEmbeddings", "ModelEncoder", "ModelEntityEmbeddings", "ModelModel", "ModelPooler", "ModelPreTrainedModel", "None", "True", "__init__", "add_pooling_layer", "and", "attention_mask", "attentions", "auto_docstring", "batch_size", "cat", "class"...
luke/modeling_luke.py:create_position_ids_from_input_ids
[ 0, -0.005805705673992634, 0.010718226432800293, -0.02299952693283558, -0.0003837906406261027, 0.023557767271995544, 0.02154809981584549, -0.019091840833425522, 0.015295801684260368, -0.015072504989802837, 0.022106342017650604, 0.01244877278804779, 0.0016886788653209805, -0.0073687802068889...
[ "Model_position_ids_from_input_ids", "cumsum", "def", "dim", "incremental_indices", "input_ids", "int", "long", "mask", "ne", "padding_idx", "return", "torch", "type_as" ]
luke/modeling_luke.py:LukeLMHead
[ -0.00016041471099015325, 0.04488105699419975, 0.013015505857765675, 0.033211980015039444, -0.0006381525308825076, 0.017167003825306892, 0.05924299359321594, -0.01739140972495079, 0.002791040576994419, 0.02176731266081333, 0.014586343429982662, 0.015035153366625309, 0.001016836380586028, 0....
[ "LayerNorm", "Linear", "ModelLMHead", "Module", "Parameter", "__init__", "bias", "class", "config", "decoder", "def", "dense", "eps", "features", "forward", "gelu", "hidden_size", "kwargs", "layer_norm", "layer_norm_eps", "nn", "return", "self", "super", "torch", "v...
luke/modeling_luke.py:LukeForMaskedLM
[ -0.0002169730723835528, 0.038384828716516495, 0.01716027595102787, -0.008975276723504066, -0.0010231250198557973, 0.06548000127077103, 0.00908817257732153, -0.0007091314764693379, -0.00392315536737442, 0.00880593154579401, 0.023143794387578964, 0.03567531332373619, -0.002441388089209795, 0...
[ "CrossEntropyLoss", "EntityPredictionHead", "Model", "ModelForMaskedLM", "ModelLMHead", "ModelMaskedLMOutput", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "_tied_weights_keys", "attention_mask", "attentions", "auto_docstring", "bias", "class", "config", "dec...
luke/modeling_luke.py:LukeForEntityClassification
[ -0.00030190724646672606, 0.04071434587240219, 0.003939170856028795, 0.002501517068594694, -0.0012004406889900565, 0.05359572172164917, 0.020357172936201096, 0.01909203827381134, -0.0027027886826545, 0.01150122843682766, 0.03910417482256889, 0.02783297188580036, 0.00022103922674432397, 0.02...
[ "Dropout", "EntityClassificationOutput", "Linear", "Model", "ModelForEntityClassification", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "attention_mask", "attentions", "auto_docstring", "binary_cross_entropy_with_logits", "class", "classifier", "config", "cros...
luke/modeling_luke.py:LukeForEntityPairClassification
[ -0.00028290378395467997, 0.023685628548264503, 0.02414112165570259, -0.010874892584979534, -0.0009323369595222175, 0.04076661169528961, 0.02049717865884304, 0.01992781274020672, 0.0004359208978712559, 0.0178780946880579, 0.03871689364314079, 0.028354430571198463, -0.00015301712846849114, 0...
[ "Dropout", "EntityPairClassificationOutput", "False", "Linear", "Model", "ModelForEntityPairClassification", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "attention_mask", "attentions", "auto_docstring", "binary_cross_entropy_with_logits", "cat", "class", "clas...
luke/modeling_luke.py:LukeForEntitySpanClassification
[ -0.00023689615773037076, 0.027720386162400246, 0.00432777451351285, -0.009504131972789764, -0.0006753308116458356, 0.05883510410785675, 0.015727076679468155, -0.0029417553450912237, 0.00022628885926678777, 0.018895119428634644, 0.04208973050117493, 0.04390003904700279, 0.00038009457057341933...
[ "Dropout", "EntitySpanClassificationOutput", "Linear", "Model", "ModelForEntitySpanClassification", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "attention_mask", "attentions", "auto_docstring", "binary_cross_entropy_with_logits", "cat", "class", "classifier", ...
luke/modeling_luke.py:LukeForSequenceClassification
[ -0.0003833608061540872, 0.029718561097979546, 0.009503028355538845, 0.011634009890258312, -0.0013102659722790122, 0.033634960651397705, 0.017278233543038368, 0.01647191494703293, -0.009272651746869087, 0.017623797059059143, 0.055059969425201416, 0.007429640274494886, -0.0006479337462224066, ...
[ "BCEWithLogitsLoss", "CrossEntropyLoss", "Dropout", "Linear", "MSELoss", "Model", "ModelForSequenceClassification", "ModelModel", "ModelPreTrainedModel", "ModelSequenceClassifierOutput", "None", "True", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "class", ...
luke/modeling_luke.py:LukeForTokenClassification
[ -0.0003104502393398434, 0.03261701390147209, 0.010451226495206356, -0.021246997639536858, -0.0011843765387311578, 0.049384914338588715, 0.036292169243097305, 0.011886834166944027, -0.001708373543806374, 0.01481547486037016, 0.05237098038196564, 0.02698943018913269, -0.0015289224684238434, ...
[ "CrossEntropyLoss", "Dropout", "False", "Linear", "Model", "ModelForTokenClassification", "ModelModel", "ModelPreTrainedModel", "ModelTokenClassifierOutput", "None", "True", "__init__", "add_pooling_layer", "attention_mask", "attentions", "auto_docstring", "class", "classifier", ...
luke/modeling_luke.py:LukeForQuestionAnswering
[ -0.00023696747666690499, 0.025691518560051918, 0.013977544382214546, 0.01675041764974594, -0.000827617768663913, 0.05658924952149391, 0.03395354747772217, 0.027502374723553658, 0.0013015527511015534, 0.03576440364122391, 0.02116437815129757, 0.022748878225684166, -0.0003023988101631403, 0....
[ "CrossEntropyLoss", "False", "Linear", "Model", "ModelForQuestionAnswering", "ModelModel", "ModelPreTrainedModel", "ModelQuestionAnsweringModelOutput", "None", "True", "__init__", "add_pooling_layer", "and", "attention_mask", "attentions", "auto_docstring", "clamp_", "class", "co...
luke/modeling_luke.py:LukeForMultipleChoice
[ -0.0003128162061329931, 0.04947529733181, 0.020480472594499588, 0.027959296479821205, -0.0012872206280007958, 0.051776472479104996, 0.04533318057656288, -0.00454482389613986, -0.0004638309183064848, 0.019675059244036674, 0.041651297360658646, -0.005292706191539764, -0.0016683529829606414, ...
[ "CrossEntropyLoss", "Dropout", "Linear", "Model", "ModelForMultipleChoice", "ModelModel", "ModelMultipleChoiceModelOutput", "ModelPreTrainedModel", "None", "True", "__init__", "attention_mask", "attentions", "auto_docstring", "class", "classifier", "classifier_dropout", "config", ...
encoder_decoder/modeling_encoder_decoder.py:shift_tokens_right
[ -0.00016833323752507567, 0.03016531653702259, 0.0016727008624002337, -0.043546922504901886, -0.000864701287355274, 0.05012432113289833, 0.04127885401248932, -0.058062564581632614, 0.007427925709635019, 0.005415014456957579, 0.020299216732382774, -0.008902170695364475, -0.0007264908053912222,...
[ "Model_tokens_right", "Modeled_input_ids", "None", "clone", "decoder_start_token_id", "def", "if", "input_ids", "is", "masked_fill_", "new_zeros", "pad_token_id", "return", "shape" ]
encoder_decoder/modeling_encoder_decoder.py:EncoderDecoderModel
[ -0.00006051892341929488, 0.05074467882514, 0.00449067959561944, -0.011114431545138359, -0.0005157264531590044, 0.0370481051504612, 0.011114431545138359, -0.023688334971666336, -0.0016980381915345788, -0.007858688943088055, 0.005697549786418676, -0.009093626402318478, -0.00224533979780972, ...
[ "AutoConfig", "AutoModel", "AutoModelForCausalLM", "BaseModelOutput", "Config", "Cross", "CrossEntropyLoss", "Either", "False", "FutureWarning", "GenerationMixin", "GitHub", "Got", "Head", "If", "Initializing", "LM", "Linear", "Model", "ModelConfig", "ModelModel", "Model_",...
nystromformer/modeling_nystromformer.py:NystromformerEmbeddings
[ -0.00024067205958999693, 0.015973493456840515, 0.011409638449549675, -0.00889951828867197, -0.001269322237931192, 0.03742361441254616, 0.033772531896829605, -0.004506807308644056, 0.005961535964161158, -0.012322410009801388, 0.02224879525601864, 0.02281927689909935, -0.0014190737856552005, ...
[ "Dropout", "Embedding", "False", "LayerNorm", "ModelEmbeddings", "Module", "None", "__init__", "arange", "buffered_token_type_ids", "buffered_token_type_ids_expanded", "class", "config", "def", "device", "dropout", "dtype", "else", "embeddings", "eps", "expand", "forward", ...
nystromformer/modeling_nystromformer.py:NystromformerSelfAttention
[ -0.00005025420250603929, 0.05123459920287132, 0.025730151683092117, -0.01760484091937542, -0.00010756162373581901, 0.02708437107503414, 0.01388073991984129, -0.004119081422686577, 0.010325916111469269, 0.011115876957774162, 0.014670700766146183, 0.007956033572554588, 0.0037523137871176004, ...
[ "Conv2d", "Dropout", "False", "Linear", "ModelSelfAttention", "Module", "None", "The", "ValueError", "_", "__init__", "a", "all_head_size", "and", "attention", "attention_head_size", "attention_mask", "attention_probs", "attention_probs_dropout_prob", "attention_scores", "bat...
nystromformer/modeling_nystromformer.py:NystromformerSelfOutput
[ -0.00012168083776487038, 0.04875698313117027, 0.03859927877783775, 0.020879726856946945, -0.0006630723946727812, 0.05620596557855606, 0.02370131015777588, -0.01952536590397358, 0.0035551965702325106, 0.017155233770608902, 0.01365646906197071, 0.003512872848659754, 0.0036962758749723434, -0...
[ "Dropout", "LayerNorm", "Linear", "ModelSelfOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "layer_norm_eps", "nn", "return", "self", "super" ]
nystromformer/modeling_nystromformer.py:NystromformerAttention
[ 0.00018736459605861455, 0.03642650693655014, 0.04434531182050705, 0.0012938763247802854, 0.0007105714175850153, 0.03914152458310127, 0.037784017622470856, -0.02386954240500927, 0.016063863411545753, -0.0051755052991211414, 0.019910139963030815, 0.02466142363846302, 0.003846277017146349, -0...
[ "False", "ModelAttention", "ModelSelfAttention", "ModelSelfOutput", "Module", "None", "__init__", "attention_mask", "attention_output", "class", "config", "def", "forward", "hidden_states", "nn", "output", "output_attentions", "outputs", "return", "self", "self_outputs", "s...
nystromformer/modeling_nystromformer.py:NystromformerIntermediate
[ -0.00025367451598867774, 0.02240910567343235, 0.04047359153628349, 0.012862369418144226, -0.0009432404185645282, 0.03635763004422188, 0.03452831506729126, -0.018864808604121208, -0.001329111517407, -0.003772961674258113, 0.023209432139992714, -0.02103712037205696, -0.0013719861162826419, 0...
[ "ACT2FN", "Linear", "ModelIntermediate", "Module", "__init__", "class", "config", "def", "dense", "else", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_size", "isinstance", "nn", "return", "self", "str", "super" ]
nystromformer/modeling_nystromformer.py:NystromformerOutput
[ -0.0002591986849438399, 0.03935529664158821, 0.0507957898080349, 0.030660521239042282, -0.0012513039400801063, 0.04988054931163788, 0.03798243775963783, -0.023681821301579475, 0.002259497530758381, 0.014758236706256866, 0.011040075682103634, 0.00267421524040401, 0.0013013561256229877, 0.00...
[ "Dropout", "LayerNorm", "Linear", "ModelOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "intermediate_size", "layer_norm_eps", "nn", "return", "self", "su...
nystromformer/modeling_nystromformer.py:NystromformerLayer
[ -0.000056466607929905877, 0.016024259850382805, 0.025212997570633888, 0.008068159222602844, 0.00009629964188206941, 0.040116678923368454, 0.025437112897634506, -0.004426281899213791, 0.005911047104746103, 0.0042301807552576065, 0.009020649828016758, 0.006723465863615274, 0.002983537968248129...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelIntermediate", "ModelLayer", "ModelOutput", "None", "__init__", "add_cross_attention", "apply_chunking_to_forward", "attention", "attention_mask", "attention_output", "chunk_size_feed_forward", "class", "config", "def", ...
nystromformer/modeling_nystromformer.py:NystromformerEncoder
[ -0.00008237543806899339, 0.010880568064749241, 0.01570391282439232, 0.022770673036575317, -0.0003032117092516273, 0.03252953290939331, 0.007459358777850866, -0.034772951155900955, 0.003084697062149644, 0.009366262704133987, 0.016376936808228493, 0.0014091457705944777, -0.0011006760178133845,...
[ "BaseModelOutputWithPastAndCrossAttentions", "False", "ModelEncoder", "ModelLayer", "Module", "ModuleList", "None", "True", "_", "__init__", "all_hidden_states", "all_self_attentions", "attention_mask", "attentions", "class", "config", "def", "else", "enumerate", "for", "forw...
nystromformer/modeling_nystromformer.py:NystromformerPredictionHeadTransform
[ -0.0002919524849858135, 0.041955187916755676, 0.0442478209733963, 0.03209686279296875, -0.0013899088371545076, 0.026136018335819244, 0.029574967920780182, -0.011348534375429153, -0.0021780014503747225, 0.020519066601991653, 0.01662158966064453, 0.00664863595739007, 0.0001226916938321665, 0...
[ "ACT2FN", "LayerNorm", "Linear", "ModelPredictionHeadTransform", "Module", "__init__", "class", "config", "def", "dense", "else", "eps", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "isinstance", "layer_norm_eps", "nn", "return", "self", "str", "super"...
nystromformer/modeling_nystromformer.py:NystromformerLMPredictionHead
[ -0.00033743318635970354, 0.03515048697590828, 0.03377203643321991, 0.018723953515291214, -0.0015794745413586497, 0.024926980957388878, 0.056746214628219604, -0.029292073100805283, 0.00021807517623528838, 0.004077916033565998, 0.019528048112988472, 0.008155832067131996, -0.0002458954695612192...
[ "Linear", "ModelLMPredictionHead", "ModelPredictionHeadTransform", "Module", "Parameter", "__init__", "bias", "class", "config", "decoder", "def", "forward", "hidden_size", "hidden_states", "nn", "return", "self", "super", "torch", "transform", "vocab_size", "zeros" ]
nystromformer/modeling_nystromformer.py:NystromformerOnlyMLMHead
[ -0.00036667720996774733, 0.01974974200129509, 0.018704168498516083, 0.03159958869218826, -0.0016627541044726968, 0.03694363683462143, 0.04042888432741165, -0.006534841377288103, -0.003775686025619507, -0.006215360015630722, 0.01579979434609413, 0.011210883036255836, -0.002410630462691188, ...
[ "ModelLMPredictionHead", "ModelOnlyMLMHead", "Module", "__init__", "class", "config", "def", "forward", "nn", "prediction_scores", "predictions", "return", "self", "sequence_output", "super" ]
nystromformer/modeling_nystromformer.py:NystromformerPreTrainedModel
[ -0.000310789851937443, 0.03311259299516678, -0.008565583266317844, -0.004656458273530006, -0.0013725054450333118, 0.030813105404376984, 0.029318440705537796, -0.001509037334471941, -0.0016096398467198014, 0.0035929461009800434, 0.005202585831284523, -0.003176164347678423, -0.0018827037420123...
[ "Model", "ModelConfig", "ModelEmbeddings", "ModelPreTrainedModel", "PreTrainedModel", "True", "_init_weights", "arange", "base_model_prefix", "class", "config", "copy_", "def", "expand", "if", "init", "isinstance", "module", "position_ids", "self", "shape", "super", "supp...
nystromformer/modeling_nystromformer.py:NystromformerModel
[ -0.000022345802790368907, 0.03836117312312126, 0.006141152698546648, 0.0026779910549521446, -0.00038031680742278695, 0.03656649589538574, 0.0185075830668211, -0.00869295559823513, 0.005804650951176882, 0.004402561578899622, 0.016712907701730728, 0.021536096930503845, -0.00015685877588111907,...
[ "BaseModelOutputWithPastAndCrossAttentions", "ModelEmbeddings", "ModelEncoder", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "batch_size", "buffered_token_type_ids", "buffered_token_type_ids_expanded", "class", "conf...
nystromformer/modeling_nystromformer.py:NystromformerForMaskedLM
[ -0.0000717090952093713, 0.036492541432380676, 0.015576085075736046, -0.011793321929872036, -0.00039287895197048783, 0.04027530550956726, 0.014463507570326328, -0.010736373253166676, 0.004311237949877977, -0.006230433937162161, 0.024142932146787643, 0.0382726676762104, -0.0007788042421452701,...
[ "CrossEntropyLoss", "MaskedLMOutput", "Model", "ModelForMaskedLM", "ModelModel", "ModelOnlyMLMHead", "ModelPreTrainedModel", "None", "__init__", "_tied_weights_keys", "attention_mask", "attentions", "auto_docstring", "bias", "class", "cls", "config", "decoder", "def", "else", ...
nystromformer/modeling_nystromformer.py:NystromformerClassificationHead
[ -0.00036297834594734013, 0.023115038871765137, 0.031898751854896545, 0.01999450847506523, -0.0014085726579651237, 0.01652725227177143, 0.052471134811639786, 0.0021670348942279816, -0.006645573303103447, 0.00733902445062995, 0.02334618754684925, -0.004131813067942858, -0.0007765208138152957, ...
[ "ACT2FN", "Dropout", "Linear", "ModelClassificationHead", "Module", "__init__", "class", "config", "def", "dense", "dropout", "features", "forward", "hidden_act", "hidden_dropout_prob", "hidden_size", "kwargs", "nn", "num_labels", "out_proj", "return", "self", "super", ...
nystromformer/modeling_nystromformer.py:NystromformerForSequenceClassification
[ -0.00035359952016733587, 0.02811652049422264, -0.0028430831152945757, 0.013029607012867928, -0.0011429479345679283, 0.02811652049422264, 0.02045876905322075, 0.011258037760853767, -0.004971823655068874, 0.011829511262476444, 0.05669021978974342, 0.0021287405397742987, -0.0001812644040910527,...
[ "BCEWithLogitsLoss", "CrossEntropyLoss", "MSELoss", "Model", "ModelClassificationHead", "ModelForSequenceClassification", "ModelModel", "ModelPreTrainedModel", "None", "SequenceClassifierOutput", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "class", "classifie...
nystromformer/modeling_nystromformer.py:NystromformerForMultipleChoice
[ -0.00020023567776661366, 0.054889384657144547, 0.016557540744543076, 0.037197764962911606, -0.0007017108728177845, 0.03991955146193504, 0.03991955146193504, -0.003090362995862961, 0, 0.013325418345630169, 0.03991955146193504, -0.00722974818199873, -0.0008611906087026, -0.012248043902218342...
[ "CrossEntropyLoss", "Linear", "Model", "ModelForMultipleChoice", "ModelModel", "ModelPreTrainedModel", "MultipleChoiceModelOutput", "None", "ReLU", "__init__", "attention_mask", "attentions", "auto_docstring", "class", "classifier", "config", "def", "else", "forward", "hidden_s...
nystromformer/modeling_nystromformer.py:NystromformerForTokenClassification
[ -0.00026293317205272615, 0.036839067935943604, 0.00022384850308299065, -0.030926624312996864, -0.0009025002946145833, 0.03752127289772034, 0.04025009274482727, 0.015804415568709373, -0.002544055925682187, 0.009152916260063648, 0.057532619684934616, 0.020920952782034874, -0.000980669632554054...
[ "CrossEntropyLoss", "Dropout", "Linear", "Model", "ModelForTokenClassification", "ModelModel", "ModelPreTrainedModel", "None", "TokenClassifierOutput", "__init__", "attention_mask", "attentions", "auto_docstring", "class", "classifier", "config", "def", "dropout", "else", "forw...
nystromformer/modeling_nystromformer.py:NystromformerForQuestionAnswering
[ -0.00017493921041022986, 0.0273432619869709, 0.01721612736582756, 0.012546394020318985, -0.0006540440954267979, 0.047710053622722626, 0.039833392947912216, 0.02362998016178608, 0.003966460935771465, 0.029256165027618408, 0.027455786243081093, 0.01710360497236252, 0.001448742812499404, 0.00...
[ "CrossEntropyLoss", "Linear", "Model", "ModelForQuestionAnswering", "ModelModel", "ModelPreTrainedModel", "None", "QuestionAnsweringModelOutput", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "clamp", "class", "config", "def", "dim", "else", "end_logits",...
informer/modeling_informer.py:InformerFeatureEmbedder
[ -0.00017565416055731475, 0.015252782963216305, 0.014348914846777916, 0.01920720748603344, -0.0003001126169692725, 0.016382619738578796, 0.008869211189448833, -0.02383953519165516, 0.011524325236678123, -0.0036437204107642174, 0.002132565015926957, -0.014800848439335823, 0.0031494172289967537...
[ "Embedding", "ModelFeatureEmbedder", "Module", "ModuleList", "__init__", "c", "cardinalities", "cat", "cat_feature_slice", "cat_feature_slices", "chunk", "class", "d", "def", "dim", "else", "embed", "embedders", "embedding_dims", "features", "for", "forward", "if", "in"...
informer/modeling_informer.py:InformerStdScaler
[ -0.0002436667709844187, 0.061923377215862274, -0.014057972468435764, 0.022993605583906174, -0.0008928519673645496, 0.037108492106199265, 0.008366485126316547, -0.0015224727103486657, 0.008594145067036152, 0.02766062505543232, 0.028457432985305786, 0.02811594493687153, 0.0031445464119315147, ...
[ "ModelStdScaler", "Module", "True", "__init__", "clamp_min", "class", "config", "data", "def", "denominator", "dim", "else", "forward", "hasattr", "if", "keepdim", "loc", "minimum_scale", "nn", "observed_indicator", "return", "scale", "scaling_dim", "self", "sqrt", ...
informer/modeling_informer.py:InformerMeanScaler
[ -0.0002414414193481207, 0.05749146267771721, -0.0013563326792791486, 0.029995545744895935, -0.001086486387066543, 0.03158621862530708, 0.002783677540719509, 0.005822998937219381, 0.002556438557803631, 0.030222784727811813, 0.01590672880411148, 0.007072813343256712, 0.002954106777906418, 0....
[ "ModelMeanScaler", "Module", "None", "True", "__init__", "abs", "batch_observations", "batch_sum", "clamp", "class", "config", "data", "def", "default_scale", "dim", "else", "forward", "hasattr", "if", "is", "keepdim", "min", "minimum_scale", "nn", "not", "num_obser...
informer/modeling_informer.py:InformerNOPScaler
[ -0.00023304640490096062, 0.0414431206882, -0.010189997963607311, 0.04121541231870651, -0.0010531562147662044, 0.033473290503025055, 0.017192063853144646, -0.008823741227388382, 0.010588490404188633, 0.03324558213353157, 0.003586424048990011, -0.0070874569937586784, 0.0005479258834384382, 0...
[ "False", "ModelNOPScaler", "Module", "None", "True", "__init__", "class", "config", "data", "def", "dim", "else", "forward", "hasattr", "if", "keepdim", "loc", "mean", "nn", "observed_indicator", "ones_like", "requires_grad", "return", "scale", "scaling_dim", "self"...
informer/modeling_informer.py:InformerSinusoidalPositionalEmbedding
[ -0.00016163897817023098, 0.040929801762104034, 0.012087784707546234, 0.006043892353773117, -0.0008890143944881856, -0.002825168427079916, 0.04025513306260109, -0.009838894940912724, 0.010457339696586132, -0.011300673708319664, 0.019115567207336426, 0.015067564323544502, 0.0025862236507236958...
[ "Embedding", "False", "FloatTensor", "ModelSinusoidalPositionalEmbedding", "None", "True", "__init__", "_freeze", "arange", "array", "class", "cos", "create_weight", "def", "device", "dim", "dtype", "else", "embedding_dim", "empty", "for", "forward", "if", "in", "inpu...
informer/modeling_informer.py:InformerValueEmbedding
[ -0.0001237391261383891, 0.02477611042559147, 0.0004772795073222369, 0.02398417890071869, -0.0004808149242307991, 0.040954116731882095, 0.042537979781627655, -0.02353164739906788, 0.0033939876593649387, 0.0266993697732687, -0.0042990511283278465, -0.03009335696697235, 0.0014000198571011424, ...
[ "Linear", "ModelValueEmbedding", "Module", "__init__", "class", "d_model", "def", "feature_size", "forward", "in_features", "nn", "out_features", "return", "self", "super", "value_projection", "x" ]
informer/modeling_informer.py:InformerPreTrainedModel
[ -0.0003047135251108557, 0.0451979786157608, 0.0002509405603632331, 0.009062538854777813, -0.0014554552035406232, 0.021910695359110832, 0.0302849393337965, 0.0023086529690772295, -0.003541846526786685, 0.011414210312068462, -0.0003208454290870577, -0.013765881769359112, -0.0030399656388908625...
[ "ModelConfig", "ModelPreTrainedModel", "ModelSinusoidalPositionalEmbedding", "PreTrainedModel", "True", "_init_weights", "base_model_prefix", "class", "config", "copy_", "create_weight", "def", "if", "init", "input_modalities", "isinstance", "main_input_name", "model", "module", ...
informer/modeling_informer.py:eager_attention_forward
[ 0.00002095530362566933, 0.025862814858555794, 0.0269921962171793, -0.01146321278065443, 0.00008955634984886274, 0.03704368323087692, 0.060083046555519104, -0.023942869156599045, 0.021458230912685394, 0.014569009654223919, 0.023152301087975502, 0.026879258453845978, 0.003063444746658206, -0...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "shape", "size", "softmax", "tor...
informer/modeling_informer.py:InformerAttention
[ -0.0001932329178089276, 0.04201360046863556, 0.04201360046863556, -0.006776387337595224, -0.0007764610345475376, 0.02010328322649002, 0.029138466343283653, -0.028234947472810745, 0.0020611512009054422, 0.014908052049577236, 0.008188134990632534, 0.009261062368750572, -0.0016235094517469406, ...
[ "ALL_ATTENTION_FUNCTIONS", "EncoderDecoderCache", "False", "Instantiating", "Linear", "ModelAttention", "Module", "None", "Please", "True", "ValueError", "__class__", "__init__", "__name__", "_attn_implementation", "a", "and", "attention_interface", "attention_mask", "attn_outp...
informer/modeling_informer.py:InformerProbSparseAttention
[ -0.00026433716993778944, 0.04768713191151619, 0.046097561717033386, -0.009423885494470596, -0.001128311618231237, 0.011297308839857578, 0.030656013637781143, -0.03951219469308853, -0.0062731290236115456, 0.012319176457822323, 0.025206055492162704, 0.024070648476481438, -0.0017102082492783666...
[ "Attention", "EncoderDecoderCache", "False", "Linear", "ModelProbSparseAttention", "Module", "None", "True", "ValueError", "_", "__init__", "_shape", "and", "arange", "astype", "attention_mask", "attn_output", "attn_probs", "attn_weights", "attn_weights_reshaped", "be", "bm...
informer/modeling_informer.py:InformerConvLayer
[ -0.000010981776540575083, 0.005085831508040428, 0.01830899342894554, 0.014579384587705135, 0.00028254621429368854, 0.0427209846675396, 0.011697412468492985, -0.007911293767392635, 0.020343326032161713, 0.0074027106165885925, 0.021925585344433784, -0.016387680545449257, 0.0018930595833808184,...
[ "BatchNorm1d", "Conv1d", "ELU", "GradientCheckpointingLayer", "MaxPool1d", "ModelConvLayer", "__init__", "activation", "c_in", "circular", "class", "def", "downConv", "forward", "in_channels", "kernel_size", "maxPool", "nn", "norm", "out_channels", "padding", "padding_mode"...
informer/modeling_informer.py:InformerEncoderLayer
[ -0.0001988690928556025, 0.05068320780992508, 0.03204633295536041, 0.013409458100795746, -0.0009020133875310421, 0.030000822618603706, 0.022159697487950325, -0.013011720031499863, 0.0004723140737041831, 0.015454969368875027, 0.013125360012054443, 0.025568882003426552, 0.0016690798802301288, ...
[ "ACT2FN", "False", "GradientCheckpointingLayer", "LayerNorm", "Linear", "ModelAttention", "ModelEncoderLayer", "ModelProbSparseAttention", "__init__", "activation_dropout", "activation_fn", "activation_function", "and", "any", "attention_dropout", "attention_mask", "attention_type", ...
informer/modeling_informer.py:InformerDecoderLayer
[ -0.00018462177831679583, 0.05924857407808304, 0.026232192292809486, -0.007293001748621464, -0.0006890190998092294, 0.023970797657966614, 0.040478985756635666, -0.03188568353652954, -0.0011660322779789567, -0.008197559975087643, 0.0010529624996706843, 0.024875355884432793, -0.0008162226295098...
[ "ACT2FN", "False", "GradientCheckpointingLayer", "LayerNorm", "Linear", "ModelAttention", "ModelDecoderLayer", "ModelProbSparseAttention", "None", "True", "__init__", "activation_dropout", "activation_fn", "activation_function", "attention_dropout", "attention_mask", "attention_type"...
informer/modeling_informer.py:InformerEncoder
[ -0.00015476417320314795, 0.03463887423276901, 0.013980069197714329, 0.012225485406816006, -0.0007110308506526053, 0.04550597444176674, 0.015621453523635864, -0.021055003628134727, 0.0033535186666995287, -0.012225485406816006, 0.012565082870423794, 0.005207151174545288, 0.002730924403294921, ...
[ "BaseModelOutput", "False", "LayerNorm", "ModelConvLayer", "ModelEncoder", "ModelEncoderLayer", "ModelPreTrainedModel", "ModelSinusoidalPositionalEmbedding", "ModelValueEmbedding", "ModuleList", "None", "True", "_", "__init__", "_prepare_4d_attention_mask", "all_attentions", "append"...
informer/modeling_informer.py:InformerDecoder
[ -0.00021545833442360163, 0.05789240822196007, 0.009914644993841648, -0.011111239902675152, -0.0012108401861041784, 0.02472963184118271, 0.05265018343925476, -0.04444495961070061, 0.007749377284198999, -0.017664022743701935, -0.004131102003157139, -0.0027065840549767017, -0.001410272787325084...
[ "BaseModelOutputWithPastAndCrossAttentions", "DynamicCache", "EncoderDecoderCache", "False", "LayerNorm", "ModelDecoder", "ModelDecoderLayer", "ModelPreTrainedModel", "ModelSinusoidalPositionalEmbedding", "ModelValueEmbedding", "ModuleList", "None", "Setting", "True", "__init__", "all_...
informer/modeling_informer.py:InformerModel
[ -0.00041507865535095334, 0.03635215014219284, -0.007806391455233097, 0.008447214029729366, -0.0014782624784857035, 0.042876895517110825, 0.021438447758555412, -0.03332280367612839, 0.0019079053308814764, -0.006058691535145044, 0.012816463597118855, 0.004252735525369644, 0.0012379538966342807...
[ "BaseModelOutput", "ModelDecoder", "ModelEncoder", "ModelFeatureEmbedder", "ModelMeanScaler", "ModelModel", "ModelNOPScaler", "ModelPreTrainedModel", "ModelStdScaler", "None", "Seq2SeqTSModelOutput", "True", "ValueError", "_", "__init__", "_past_length", "abs", "and", "append", ...
informer/modeling_informer.py:weighted_average
[ -0.00007938854105304927, 0.033500220626592636, 0.03528689965605736, 0.005471702665090561, -0.0003873462846968323, 0.03484022989869118, 0.004159610718488693, -0.008933392353355885, 0.004103776998817921, -0.0012492790119722486, 0.01133424136787653, -0.015521768480539322, -0.0021775143686681986...
[ "Model_average", "Model_tensor", "None", "clamp", "def", "dim", "else", "if", "input_tensor", "is", "min", "not", "return", "sum", "sum_weights", "torch", "weights", "where", "zeros_like" ]
informer/modeling_informer.py:nll
[ 0.00003988773823948577, 0.028024237602949142, 0.02416665107011795, 0.009019947610795498, -0.00017018768994603306, 0.06262906640768051, 0.04243346303701401, 0.025414694100618362, 0.01667839288711548, -0.020082145929336548, 0.025301234796643257, -0.028931906446814537, 0.001205496140755713, 0...
[ "Model", "def", "input", "log_prob", "return", "target" ]
informer/modeling_informer.py:InformerForPrediction
[ -0.0005633760592900217, 0.031488966196775436, -0.005468503572046757, 0.022354761138558388, -0.002298574196174741, 0.02680167742073536, 0.050478495657444, -0.022474948316812515, -0.011598034761846066, 0.0027793217450380325, 0.01670597866177559, -0.012559530325233936, -0.001727686496451497, ...
[ "False", "ModelForPrediction", "ModelModel", "ModelPreTrainedModel", "NegativeBinomialOutput", "None", "NormalOutput", "SampleTSPredictionOutput", "Seq2SeqTSPredictionOutput", "StudentTOutput", "True", "_", "__init__", "append", "auto_docstring", "cache_position", "cat", "class", ...
sew/modeling_sew.py:SEWNoLayerNormConvLayer
[ -0.00008776657341513783, 0.017161672934889793, 0.009371176362037659, -0.006294495426118374, 0, 0.03612983971834183, 0.032742664217948914, -0.03025873936712742, 0.012871255166828632, -0.016371332108974457, 0.03206523135304451, -0.006774344481527805, 0.002512152772396803, -0.0072259674780070...
[ "ACT2FN", "Conv1d", "GradientCheckpointingLayer", "Model", "__init__", "activation", "class", "config", "conv", "conv_dim", "conv_kernel", "conv_stride", "def", "else", "feat_extract_activation", "forward", "hidden_states", "if", "in_conv_dim", "kernel_size", "layer_id", "n...
sew/modeling_sew.py:SEWLayerNormConvLayer
[ -0.00008998542034532875, 0.02055196277797222, 0.010784134268760681, 0.0013056707102805376, -0.0002876004436984658, 0.036135319620370865, 0.031166713684797287, -0.02653687633574009, 0.012647362425923347, -0.010897057130932808, 0.02935994789004326, 0.0026678028516471386, 0.0018773428164422512,...
[ "ACT2FN", "Conv1d", "GradientCheckpointingLayer", "LayerNorm", "Model", "True", "__init__", "activation", "class", "config", "conv", "conv_dim", "conv_kernel", "conv_stride", "def", "elementwise_affine", "else", "feat_extract_activation", "forward", "hidden_states", "if", "...
sew/modeling_sew.py:SEWGroupNormConvLayer
[ -0.00006655586912529543, 0.016699794679880142, 0.0028350157663226128, 0.0013751941733062267, 0.00009432421938981861, 0.03791304677724838, 0.027757765725255013, -0.02764492854475975, 0.013822464272379875, -0.011057971976697445, 0.03227122500538826, -0.0006593879661522806, 0.00207336968742311,...
[ "ACT2FN", "Conv1d", "GradientCheckpointingLayer", "GroupNorm", "Model", "True", "__init__", "activation", "affine", "class", "config", "conv", "conv_dim", "conv_kernel", "conv_stride", "def", "else", "feat_extract_activation", "forward", "hidden_states", "if", "in_conv_dim"...
sew/modeling_sew.py:SEWPositionalConvEmbedding
[ -0.00014321714115794748, 0.03326881304383278, -0.007525088265538216, -0.004639528226107359, -0.0005127527401782572, 0.027271373197436333, 0.03055299073457718, -0.04300050437450409, 0.012504094280302525, -0.010071170516312122, 0.028629284352064133, 0, 0.00483755674213171, -0.018444953486323...
[ "ACT2FN", "Conv1d", "GatheredParameters", "Model", "ModelSamePadLayer", "Module", "__init__", "activation", "class", "config", "conv", "deepspeed", "def", "dim", "else", "feat_extract_activation", "forward", "groups", "hasattr", "hidden_size", "hidden_states", "if", "is_d...
sew/modeling_sew.py:SEWSamePadLayer
[ -0.00012616814638022333, 0.04065614193677902, 0.03229904547333717, 0.0005681978072971106, -0.0007834777352400124, 0.013439113274216652, 0.019989268854260445, -0.04043027386069298, 0.018408196046948433, -0.011688640341162682, 0.005618452560156584, 0.004997317213565111, 0.003980913665145636, ...
[ "Model", "Module", "__init__", "class", "def", "else", "forward", "hidden_states", "if", "nn", "num_conv_pos_embeddings", "num_pad_remove", "return", "self", "super" ]
sew/modeling_sew.py:SEWUpsampling
[ -0.00027743421378545463, 0.01718302257359028, 0.02829471044242382, 0.010653474368155003, -0.0008877894724719226, 0.01821400411427021, 0.02336890995502472, -0.053840138018131256, 0.0011598540004342794, 0.011741732247173786, 0.004009372089058161, -0.014433738775551319, 0.0012672479497268796, ...
[ "ACT2FN", "Linear", "Model", "Module", "__init__", "activation", "bsz", "class", "config", "def", "feat_extract_activation", "forward", "hidden_size", "hidden_states", "if", "nn", "projection", "reshape", "return", "self", "size", "squeeze_factor", "src_embed_dim", "src...
sew/modeling_sew.py:SEWFeatureEncoder
[ -0.00004578685911837965, 0.03922173008322716, -0.021301455795764923, 0.03381183370947838, 0.00029233150416985154, 0.021864986047148705, -0.003662948729470372, -0.019385451450943947, 0.007551310118287802, 0.001613106345757842, 0.030205240473151207, -0.0011270612012594938, 0.002507711062207818...
[ "False", "Model", "ModelGroupNormConvLayer", "ModelLayerNormConvLayer", "ModelNoLayerNormConvLayer", "Module", "ModuleList", "None", "True", "ValueError", "__init__", "_freeze_parameters", "_requires_grad", "and", "be", "but", "class", "config", "conv_layer", "conv_layers", "...
sew/modeling_sew.py:eager_attention_forward
[ 0.00002095530362566933, 0.025862814858555794, 0.0269921962171793, -0.01146321278065443, 0.00008955634984886274, 0.03704368323087692, 0.060083046555519104, -0.023942869156599045, 0.021458230912685394, 0.014569009654223919, 0.023152301087975502, 0.026879258453845978, 0.003063444746658206, -0...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "shape", "size", "softmax", "tor...
sew/modeling_sew.py:SEWAttention
[ -0.00028685652068816125, 0.05163417384028435, 0.04130734130740166, -0.005823187530040741, -0.0010828833328559995, 0.02570234425365925, 0.035570207983255386, -0.02386646345257759, -0.004876560997217894, 0.03396381437778473, 0.008089354261755943, -0.010326835326850414, -0.003671763464808464, ...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "Model", "Module", "None", "ValueError", "__init__", "_attn_implementation", "and", "attention_interface", "attention_mask", "attn_output", "attn_weights", "be", "bsz", "by", "class", "config", "contiguous", "current_states", "d...
sew/modeling_sew.py:SEWFeedForward
[ -0.0002615255652926862, 0.040124472230672836, 0.043105147778987885, 0.0011249182280153036, -0.0011177530977874994, 0.028201771900057793, 0.038519490510225296, -0.02212578058242798, -0.003295938717201352, -0.02086472511291504, 0.03164101019501686, -0.02132328972220421, -0.0008598100976087153,...
[ "ACT2FN", "Dropout", "Linear", "Model", "Module", "__init__", "activation_dropout", "class", "config", "def", "else", "forward", "hidden_act", "hidden_dropout", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_dense", "intermediate_dropout", "interme...
sew/modeling_sew.py:SEWEncoderLayer
[ -0.00005358808266464621, 0.04546719044446945, 0.03829994797706604, 0.003807597327977419, -0.00018285565602127463, 0.033596448600292206, 0.035612232983112335, -0.003499629907310009, 0.006859274581074715, 0.0025057350285351276, 0.010694868862628937, 0.009966946206986904, 0.0022677602246403694,...
[ "Dropout", "False", "GradientCheckpointingLayer", "LayerNorm", "Model", "ModelAttention", "ModelFeedForward", "None", "_", "__init__", "attention", "attention_dropout", "attention_mask", "attn_residual", "attn_weights", "class", "config", "def", "dropout", "embed_dim", "eps",...
sew/modeling_sew.py:SEWEncoder
[ -0.00016656603838782758, 0.02506287209689617, 0.019165726378560066, 0.015536712482571602, -0.0008221982861869037, 0.030619798228144646, 0.020299792289733887, -0.02279473841190338, 0.005443519912660122, -0.020072979852557182, 0.04264090582728386, 0.009185939095914364, 0.002310660667717457, ...
[ "AvgPool1d", "BaseModelOutput", "Dropout", "False", "LayerNorm", "Model", "ModelLayer", "ModelPositionalConvEmbedding", "ModelUpsampling", "Module", "ModuleList", "None", "True", "_", "__init__", "all_hidden_states", "all_self_attentions", "and", "arange", "attention_ids", "a...
sew/modeling_sew.py:SEWPreTrainedModel
[ -0.00014421141531784087, 0.04547232389450073, -0.0016742594307288527, -0.013056409545242786, -0.0006718141376040876, 0.02239849604666233, 0.019584614783525467, -0.04187055304646492, 0.003629906801506877, -0.02183571830391884, 0.03466701880097389, 0.030840139836072922, 0.0032641023863106966, ...
[ "Conv1d", "False", "GatheredParameters", "GroupNorm", "LayerNorm", "Linear", "Model", "ModelConfig", "ModelPositionalConvEmbedding", "None", "PreTrainedModel", "True", "_conv_out_length", "_get_feat_extract_output_lengths", "_get_feature_vector_attention_mask", "_init_weights", "_sup...
sew/modeling_sew.py:_compute_mask_indices
[ 0, -0.008906038478016853, 0.00551726296544075, -0.014171244576573372, 0, 0.024309566244482994, -0.0006616514874622226, -0.05265205353498459, 0.013331051915884018, -0.0200525913387537, 0.040777336806058884, 0.030695026740431786, -0.006189417093992233, -0.012434846721589565, 0.005489256698...
[ "False", "None", "ValueError", "_", "_compute_mask_indices", "and", "append", "arange", "array", "attention_mask", "batch_size", "be", "bigger", "bool", "broadcast_to", "but", "choice", "compute_num_masked_span", "concatenate", "def", "detach", "dtype", "dummy_mask_idx", ...
sew/modeling_sew.py:SEWModel
[ -0.0000999118565232493, 0.026923615485429764, 0.010152447037398815, 0.005496904719620943, -0.00028571285656653345, 0.019295258447527885, 0.02389470860362053, -0.034327611327171326, 0.008469720371067524, -0.026250524446368217, 0.025801798328757286, 0.02030489407479763, -0.0011288286186754704,...
[ "BaseModelOutput", "Dropout", "LayerNorm", "Linear", "Model", "ModelEncoder", "ModelFeatureEncoder", "ModelPreTrainedModel", "None", "Parameter", "Tensor", "True", "__init__", "_compute_mask_indices", "_get_feature_vector_attention_mask", "_mask_hidden_states", "and", "apply_spec_a...
sew/modeling_sew.py:SEWForCTC
[ -0.0003008818021044135, 0.042868491262197495, 0.025789868086576462, -0.011347541585564613, -0.0011820356594398618, 0.0302601121366024, 0.02235121838748455, -0.04768260195851326, -0.0030231457203626633, -0.024185165762901306, 0.0302601121366024, 0.0013754596002399921, -0.00102443085052073, ...
[ "Cannot", "CausalLMOutput", "Dropout", "False", "Linear", "Model", "ModelModel", "ModelPreTrainedModel", "None", "Please", "True", "ValueError", "You", "_HIDDEN_STATES_START_POSITION", "__class__", "__init__", "_freeze_parameters", "_get_feat_extract_output_lengths", "a", "adap...
sew/modeling_sew.py:SEWForSequenceClassification
[ -0.0003144273650832474, 0.04001931846141815, 0.0055992938578128815, 0.02842281199991703, -0.0009166356758214533, 0.016144156455993652, 0.0056845624931156635, -0.0029417609330266714, -0.009891138412058353, -0.0031833548564463854, 0.027058515697717667, -0.008640534244477749, -0.002344881882891...
[ "CrossEntropyLoss", "False", "Linear", "Model", "ModelModel", "ModelPreTrainedModel", "None", "Parameter", "Sequence", "SequenceClassifierOutput", "True", "ValueError", "_HIDDEN_STATES_START_POSITION", "__init__", "_freeze_parameters", "_get_feature_vector_attention_mask", "adapters"...
fsmt/modeling_fsmt.py:invert_mask
[ 0.0002009935851674527, 0.006602550391107798, 0.03506181761622429, -0.021059859544038773, 0.0009533854899927974, 0.04007064923644066, 0.0024474970996379852, -0.043941110372543335, 0.025954853743314743, -0.0030166825745254755, 0.025271831080317497, 0.021059859544038773, 0.0042404308915138245, ...
[ "Model_mask", "assert", "attention_mask", "def", "dim", "eq", "return" ]
fsmt/modeling_fsmt.py:triu_onnx
[ 0.00006751874025212601, -0.0011509077157825232, 0.032762035727500916, 0.000836702820379287, 0.0004801332834176719, 0.04993386194109917, 0.01807560585439205, -0.030954474583268166, 0.026096655055880547, 0.02022208459675312, 0.008190508931875229, 0.007343214936554432, 0.0043211993761360645, ...
[ "Model_onnx", "arange", "def", "device", "diagonal", "expand", "if", "l", "mask", "masked_fill", "return", "shape", "torch", "unsqueeze", "x" ]
fsmt/modeling_fsmt.py:_prepare_fsmt_decoder_inputs
[ -0.0002618278085719794, 0.04384360462427139, 0.0025537179317325354, -0.02525024302303791, -0.0014633664395660162, 0.03328441083431244, 0.02892300672829151, -0.05463234707713127, 0.013600699603557587, 0.02065929025411606, 0.005394370295107365, 0.004763114266097546, -0.001599660376086831, 0....
[ "None", "_prepare_Model_decoder_inputs", "bsz", "causal_mask", "causal_mask_dtype", "config", "decoder_input_ids", "decoder_padding_mask", "def", "device", "dtype", "else", "fill_with_neg_inf", "float32", "if", "input_ids", "invert_mask", "is", "make_padding_mask", "pad_token_i...
fsmt/modeling_fsmt.py:PretrainedFSMTModel
[ -0.00012410400086082518, 0.05137377604842186, -0.013294090516865253, 0.014533370733261108, -0.0007710290956310928, 0.032221272587776184, 0.04348744824528694, -0.007379346992820501, 0.006309059914201498, -0.0011618246790021658, 0.0002050796611001715, -0.007604670710861683, -0.0007956738700158...
[ "Embedding", "False", "Linear", "ModelConfig", "ModelModel", "ModelModelModel", "None", "SinusoidalPositionalEmbedding", "_init_weights", "_is_hf_initialized", "and", "attention_mask", "base_model_prefix", "bias", "class", "config", "copy_", "def", "device", "dummy_inputs", "...
fsmt/modeling_fsmt.py:_make_linear_from_emb
[ -0.00016620589303784072, -0.0034231343306601048, 0.038022249937057495, 0.04096445068717003, -0.0007567672873847187, 0.041869740933179855, 0.015163635835051537, -0.00944898184388876, 0.005799524951726198, 0.023198099806904793, -0.004243555013090372, -0.000661287282127887, 0.000211293663596734...
[ "Linear", "_make_linear_from_emb", "data", "def", "emb", "emb_size", "lin_layer", "nn", "return", "shape", "vocab_size", "weight" ]
fsmt/modeling_fsmt.py:_check_shapes
[ 0.00008882009569788352, -0.021595334634184837, 0.017024893313646317, 0.015539499931037426, 0.0008533870568498969, 0.0024280468933284283, 0.010054970160126686, -0.02330924943089485, 0.030621955171227455, 0.04684701934456825, -0.011711754836142063, -0.012568713165819645, 0.001285436563193798, ...
[ "_check_shapes", "def", "if", "shape2", "shape_1" ]
fsmt/modeling_fsmt.py:shift_tokens_right
[ -0.0000870274961926043, 0.02329172194004059, 0.017553182318806648, -0.031955793499946594, -0.0003727238217834383, 0.04973401501774788, 0.04433303698897362, -0.04208263009786606, 0.01378374919295311, -0.00015471555525436997, 0.003769433358684182, 0.0015119928866624832, -0.00021449201449286193...
[ "Model_tokens_right", "clone", "def", "dim", "gather", "index_of_eos", "input_ids", "masked_fill_", "ne", "pad_token_id", "prev_output_tokens", "return", "squeeze", "sum", "unsqueeze" ]
fsmt/modeling_fsmt.py:make_padding_mask
[ 0.0001469629496568814, -0.006617732346057892, 0.02658357284963131, 0.0025626113638281822, 0.0007180944085121155, 0.004139603115618229, 0.012052721343934536, -0.04460633173584938, 0.02095145918428898, -0.024668654426932335, 0.0351443849503994, 0.015995200723409653, 0.003224384505301714, -0....
[ "Model_padding_mask", "None", "any", "def", "eq", "if", "input_ids", "not", "padding_idx", "padding_mask", "return" ]
fsmt/modeling_fsmt.py:EncoderLayer
[ -0.00004370186070445925, 0.027097787708044052, 0.012874260544776917, 0.00036191288381814957, -0.00009355272050015628, 0.04812387377023697, 0.0268729105591774, 0.002150394953787327, 0.009894628077745438, -0.005453289486467838, 0.014167307876050472, -0.010007066652178764, 0.0022347241174429655...
[ "ACT2FN", "Attention", "False", "LayerNorm", "Linear", "ModelLayer", "Model_attention_heads", "Model_ffn_dim", "Model_padding_mask", "Module", "__init__", "activation_dropout", "activation_fn", "activation_function", "attention_dropout", "attn_weights", "class", "config", "d_mode...
fsmt/modeling_fsmt.py:FSMTEncoder
[ -0.00014173585805110633, 0.051478464156389236, 0.014853918924927711, 0.007313570473343134, -0.0008078943938016891, 0.037418268620967865, 0.0342433862388134, -0.011565646156668663, 0.0047339778393507, -0.014400363899767399, 0.023358071222901344, -0.0049040610902011395, -0.001204754807986319, ...
[ "BaseModelOutput", "Embedding", "EncoderLayer", "False", "Model", "Module", "ModuleList", "None", "SinusoidalPositionalEmbedding", "True", "_", "__init__", "all_attentions", "and", "attention_mask", "attentions", "attn", "class", "config", "d_model", "def", "dropout", "dr...
fsmt/modeling_fsmt.py:DecoderLayer
[ -0.000044314296246739104, 0.026957131922245026, 0.021228741854429245, 0.003847007406875491, 0, 0.05301569029688835, 0.02179034799337387, -0.002906315727159381, 0.0073570506647229195, 0.006289997138082981, 0.001614619861356914, -0.00043875540723092854, 0.001031952677294612, -0.0054195066913...
[ "ACT2FN", "Attention", "False", "LayerNorm", "Linear", "ModelLayer", "Model_attention_heads", "Model_ffn_dim", "Model_padding_mask", "Module", "None", "True", "__init__", "activation_dropout", "activation_fn", "activation_function", "assert", "attention_dropout", "attn_mask", "...
fsmt/modeling_fsmt.py:FSMTDecoder
[ -0.00020885166304651648, 0.0618840828537941, 0.01842871494591236, -0.00472093652933836, -0.0010380372405052185, 0.027643073350191116, 0.04777815192937851, -0.021272653713822365, 0.006768571678549051, -0.015471020713448524, 0.009612509049475193, -0.005062208976596594, -0.0013224310241639614, ...
[ "BaseModelOutputWithPastAndCrossAttentions", "DecoderLayer", "Embedding", "False", "Linear", "Model", "Module", "ModuleList", "None", "SinusoidalPositionalEmbedding", "True", "__init__", "all_cross_attns", "all_hidden_states", "all_self_attns", "and", "attentions", "cache_position"...
fsmt/modeling_fsmt.py:_reorder_buffer
[ -0.0002913032367359847, -0.0023016552440822124, 0.02186572551727295, -0.0184132419526577, -0.0014385345857590437, 0.020369648933410645, 0.01956406980752945, -0.0828595906496048, 0.0103574488312006, 0.01565125584602356, -0.02992151863873005, 0.02232605591416359, -0.002402352634817362, -0.02...
[ "None", "_reorder_buffer", "attn_cache", "def", "for", "if", "in", "index_select", "input_buffer_k", "is", "items", "k", "new_order", "not", "return" ]
fsmt/modeling_fsmt.py:Attention
[ -0.00013577946810983121, 0.03969315439462662, 0.04193570837378502, -0.011773393489420414, -0.0004835501022171229, 0.024331679567694664, 0.020631471648812294, -0.019285939633846283, -0.00022600710508413613, 0.02859252691268921, -0.005466218572109938, -0.004933612421154976, -0.0017099452670663...
[ "EncoderDecoderCache", "False", "Linear", "Model", "Module", "None", "True", "__init__", "and", "assert", "attn_mask", "attn_output", "attn_probs", "attn_weights", "attn_weights_reshaped", "be", "bmm", "bsz", "by", "cache_key", "cache_position", "class", "contiguous", "...
fsmt/modeling_fsmt.py:fill_with_neg_inf
[ 0.00008999965211842209, 0.0534975528717041, 0.013880142010748386, -0.007642507087439299, -0.00009614597365725785, 0.03101958893239498, 0.027760284021496773, -0.02214079350233078, 0.013599167577922344, 0.01483545545488596, -0.008541625924408436, -0.043382469564676285, 0.0038774486165493727, ...
[ "Model_", "Model_with_neg_inf", "def", "dtype", "finfo", "float", "min", "return", "t", "torch", "type_as" ]
fsmt/modeling_fsmt.py:_get_shape
[ -0.00007011320849414915, -0.012135317549109459, -0.009426037780940533, -0.004346136935055256, -0.00039863106212578714, 0.00677320035174489, 0.06276499480009079, -0.04041342809796333, 0.012191761285066605, 0.05666911229491234, -0.021561354398727417, 0.0340917743742466, -0.004007476847618818, ...
[ "None", "_get_shape", "def", "getattr", "return", "shape", "t" ]
fsmt/modeling_fsmt.py:FSMTModel
[ 0.00005268307722872123, 0.055962108075618744, 0.020482130348682404, -0.01835557073354721, 0.00018012803047895432, 0.027645280584692955, 0.0282049011439085, -0.036263443529605865, 0.00800258107483387, -0.012479550205171108, 0.020482130348682404, 0.009177785366773605, -0.0018327590078115463, ...
[ "BaseModelOutput", "DynamicCache", "EncoderDecoderCache", "False", "Model", "ModelDecoder", "ModelEncoder", "None", "PretrainedModel", "Seq2SeqModelOutput", "__init__", "_prepare_Model_decoder_inputs", "_tied_weights_keys", "and", "attention_mask", "attentions", "auto_docstring", "...
fsmt/modeling_fsmt.py:FSMTForConditionalGeneration
[ -0.00007681942224735394, 0.04849400743842125, 0, -0.017989711835980415, -0.000328228430589661, 0.042683664709329605, 0.011788289994001389, -0.016537126153707504, -0.004860574379563332, 0.004832639824599028, 0.02324136719107628, 0.009777017869055271, -0.0022068126127123833, 0.00960941147059...
[ "CrossEntropyLoss", "False", "GenerationMixin", "Model", "ModelModel", "None", "PretrainedModelModel", "Seq2SeqLMOutput", "__init__", "attention_mask", "auto_docstring", "base_model", "base_model_prefix", "cache_position", "class", "config", "cross_attentions", "decoder", "decode...
fsmt/modeling_fsmt.py:SinusoidalPositionalEmbedding
[ -0.00028367459890432656, 0.01712101884186268, 0.022406702861189842, 0.00965211819857359, -0.0015727782156318426, 0.0101691959425807, 0.036080535501241684, 0.0023268498480319977, 0.0016589578008279204, -0.016891207545995712, 0.014018552377820015, 0.010341554880142212, 0.00037524045910686255, ...
[ "Embedding", "False", "ModelPositionalEmbedding", "None", "Parameter", "__init__", "arange", "bsz", "cat", "class", "cos", "cumsum", "def", "detach_", "device", "dim", "dtype", "emb", "embedding_dim", "exp", "float", "forward", "get_embedding", "half_dim", "if", "in...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRKeypointMatchingOutput
[ -0.00005685357245965861, 0.005556665360927582, 0.04197115823626518, -0.023693395778536797, -0.00016130547737702727, 0.04783809185028076, 0.05551024153828621, -0.02030862495303154, 0.017487982288002968, -0.0018757270881906152, 0.01997014693915844, 0.030688587576150894, -0.0022847203072160482,...
[ "ModelKeypointMatchingOutput", "ModelOutput", "None", "attentions", "class", "hidden_states", "keypoints", "loss", "matches", "matching_scores", "r" ]
efficientloftr/modeling_efficientloftr.py:compute_embeddings
[ -0.00018259562784805894, -0.00043964770156890154, 0.027116335928440094, -0.0001888003316707909, -0.0009714796324260533, 0.02575484663248062, 0.047425225377082825, -0.027570165693759918, 0.015203301794826984, -0.002184056444093585, 0.017926281318068504, 0.009927528910338879, -0.00104239059146...
[ "Model_embeddings", "cumsum", "def", "device", "dtype", "emb", "embed_height", "embed_width", "hidden_size", "i_indices", "inv_freq", "j_indices", "ones", "return", "torch", "unsqueeze", "zeros" ]
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRRotaryEmbedding
[ -0.00029273677500896156, 0.04556574299931526, 0.0003903156903106719, -0.003932069055736065, -0.0015684908721596003, 0.036082517355680466, 0.04417795315384865, 0.0064185247756540775, -0.0017202802700921893, 0.020469889044761658, 0.004568139091134071, -0.0009757892694324255, -0.001170947100035...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "class", "clone", "compute_default_rope_parameters", "compute_embeddings", "config", "cos", "cpu", "def", "default...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRConvNormLayer
[ -0.000021413387003121898, 0.011415763758122921, 0.012828606180846691, 0.012376496568322182, 0.0001827864471124485, 0.028143813833594322, 0.007148980628699064, -0.03029133379459381, 0.017519241198897362, 0.010342003777623177, 0.04701938480138779, -0.017632268369197845, 0.0030517387203872204, ...
[ "ACT2CLS", "BatchNorm2d", "Conv2d", "Identity", "ModelConvNormLayer", "Module", "None", "__init__", "activation", "batch_norm_eps", "class", "config", "conv", "def", "else", "forward", "hidden_state", "if", "in_channels", "is", "kernel_size", "nn", "norm", "out_channels...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRRepVGGBlock
[ -0.00006978242890909314, -0.019457297399640083, 0.008363224565982819, -0.006144409533590078, 0.0002969086926896125, 0.04778985306620598, 0.009614863432943821, -0.01740916073322296, 0.009102828800678253, -0.0013938706833869219, 0.038459453731775284, -0.019571082666516304, 0.002531724283471703...
[ "ACT2FN", "BatchNorm2d", "GradientCheckpointingLayer", "Identity", "ModelConvNormLayer", "ModelRepVGGBlock", "None", "__init__", "activation", "activation_function", "and", "block_idx", "class", "config", "conv1", "conv2", "def", "else", "forward", "hidden_states", "identity"...
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRRepVGGStage
[ -0.00006419470446417108, -0.03903038054704666, -0.009928780607879162, 0.019857561215758324, 0.00018366817675996572, 0.05044277012348175, 0.010727647691965103, -0.0435953363776207, 0.008844603784382343, 0.0025963191874325275, 0.034237176179885864, -0.04907328262925148, -0.00018099340377375484...
[ "ModelRepVGGBlock", "ModelRepVGGStage", "Module", "ModuleList", "__init__", "append", "block", "block_idx", "blocks", "class", "config", "def", "for", "forward", "hidden_states", "in", "nn", "range", "return", "self", "stage_idx", "stage_num_blocks", "super" ]
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRepVGG
[ -0.00017732522974256426, -0.0007698944536969066, -0.005246688146144152, 0.0360424667596817, -0.00035821477649733424, 0.04881701245903969, 0.023267921060323715, -0.03284883126616478, 0.0050470856949687, 0.018477465957403183, 0.05201064795255661, -0.042429737746715546, 0.0007520728395320475, ...
[ "ModelRepVGGStage", "ModelepVGG", "Module", "ModuleList", "__init__", "append", "class", "config", "def", "for", "forward", "hidden_states", "in", "len", "nn", "outputs", "range", "return", "self", "stage", "stage_idx", "stage_stride", "stages", "super" ]
efficientloftr/modeling_efficientloftr.py:EfficientLoFTRAggregationLayer
[ -0.00021891610231250525, -0.0037304728757590055, 0.006378823891282082, 0.008884790353477001, -0.0006905646296218038, 0.04738554731011391, -0.004243056755512953, -0.029388152062892914, 0.012472879141569138, 0.011846386827528477, 0.03963983431458473, 0.0018367595039308071, 0.002249674405902624...
[ "Conv2d", "LayerNorm", "MaxPool2d", "ModelAggregationLayer", "Module", "None", "__init__", "class", "config", "def", "else", "encoder_hidden_states", "forward", "groups", "hidden_size", "hidden_states", "if", "is", "is_cross_attention", "kernel_size", "kv_aggregation", "kv_...
efficientloftr/modeling_efficientloftr.py:rotate_half
[ 0.00004134093614993617, 0.011483984999358654, 0.030173607170581818, -0.007937460206449032, 0.0003535969590302557, 0.03129948675632477, 0.01598750799894333, -0.02792184427380562, 0.018577033653855324, 0.03422677889466286, -0.00529163982719183, 0.0008197819697670639, -0.0004046134417876601, ...
[ "Model_half", "def", "dim", "flatten", "return", "rot_x", "stack", "torch", "x", "x1", "x2" ]
efficientloftr/modeling_efficientloftr.py:apply_rotary_pos_emb
[ -0.00014119588013272732, 0.029553454369306564, 0.02216508984565735, -0.000987483188509941, -0.0007423884235322475, 0.02387009747326374, 0.04978620260953903, -0.006393775809556246, 0.012673884630203247, 0.03773748502135277, 0.006308525800704956, 0.002358592813834548, -0.0006713464972563088, ...
[ "Model_rotary_pos_emb", "cos", "def", "float", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]