identifier
stringlengths
24
102
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
florence2/modeling_florence2.py:Florence2Model
[ -0.00017649360233917832, 0.03914313763380051, -0.001873278641141951, 0.013532341457903385, -0.00040366360917687416, 0.03534065932035446, 0.016663791611790657, -0.03534065932035446, -0.0030755321495234966, 0.03355126082897186, 0.02203199453651905, 0.004669216927140951, -0.0016565934056416154,...
[ "AutoModel", "Image", "ModelModel", "ModelMultiModalProjector", "ModelPreTrainedModel", "ModelSeq2SeqModelOutput", "ModelVisionBackbone", "None", "Obtains", "True", "ValueError", "You", "__init__", "_checkpoint_conversion_mapping", "all", "and", "apply", "attention_mask", "attent...
florence2/modeling_florence2.py:shift_tokens_right
[ -0.0001357423752779141, 0.026739485561847687, 0.00459761219099164, -0.04174518957734108, -0.0005782272783108056, 0.05189942196011543, 0.04039129242300987, -0.057089366018772125, 0.006572046782821417, 0.007784913759678602, 0.020759768784046173, -0.009872172959148884, -0.0006698974757455289, ...
[ "Model_tokens_right", "Modeled_input_ids", "None", "clone", "decoder_start_token_id", "def", "if", "input_ids", "is", "masked_fill_", "new_zeros", "pad_token_id", "return", "shape" ]
florence2/modeling_florence2.py:Florence2ForConditionalGeneration
[ -0.0002030340110650286, 0.037576235830783844, 0.005962695926427841, 0.015975525602698326, -0.0007136953645385802, 0.022163229063153267, 0.01946314051747322, -0.037576235830783844, -0.003107914701104164, 0.012262903153896332, 0.031051021069288254, 0.0021375701762735844, 0.0014133277582004666,...
[ "False", "GenerationMixin", "Linear", "ModelForConditionalGeneration", "ModelModel", "ModelPreTrainedModel", "ModelSeq2SeqLMOutput", "None", "True", "__init__", "_checkpoint_conversion_mapping", "_prepare_encoder_decoder_kwargs_for_generation", "_tied_weights_keys", "and", "attention_mas...
vit/modeling_vit.py:ViTEmbeddings
[ -0.00027371433679945767, 0.02255549281835556, 0.014540850184857845, 0.006526208482682705, -0.0013739386340603232, 0.020952563732862473, 0.021410543471574783, -0.034348465502262115, 0.0021467790938913822, 0.0024902636650949717, 0.02163953334093094, 0.027478773146867752, -0.0020465960260480642...
[ "Dropout", "False", "ModelEmbeddings", "ModelPatchEmbeddings", "Module", "None", "Parameter", "__init__", "align_corners", "and", "batch_size", "bicubic", "bool_masked_pos", "cat", "class", "class_pos_embed", "cls_token", "cls_tokens", "config", "def", "dim", "dropout", "...
vit/modeling_vit.py:ViTPatchEmbeddings
[ -0.00009065198537427932, 0.013236950151622295, 0.017912128940224648, 0.01453248132020235, -0.00019362560124136508, 0.0013307359768077731, 0.02039053663611412, -0.017123544588685036, 0.007547878194600344, 0.007773187942802906, 0.011378144845366478, 0.010364250279963017, -0.0029290271922945976...
[ "Conv2d", "Expected", "False", "Input", "Iterable", "Make", "ModelPatchEmbeddings", "Module", "ValueError", "__init__", "abc", "batch_size", "but", "channel", "class", "collections", "config", "configuration", "def", "dimension", "doesn", "else", "embeddings", "f", "f...
vit/modeling_vit.py:eager_attention_forward
[ 0.00002095530362566933, 0.025862814858555794, 0.0269921962171793, -0.01146321278065443, 0.00008955634984886274, 0.03704368323087692, 0.060083046555519104, -0.023942869156599045, 0.021458230912685394, 0.014569009654223919, 0.023152301087975502, 0.026879258453845978, 0.003063444746658206, -0...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "shape", "size", "softmax", "tor...
vit/modeling_vit.py:ViTSelfAttention
[ -0.00010323112655896693, 0.03958452492952347, 0.044982410967350006, 0.00047090963926166296, -0.0005376804037950933, 0.012370163574814796, 0.028788743540644646, -0.020242085680365562, 0.0002301834465470165, 0.021928926929831505, 0.017430685460567474, 0.007478326093405485, -0.00055525166681036...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelSelfAttention", "Module", "None", "The", "ValueError", "__init__", "_attn_implementation", "a", "all_head_size", "and", "attention", "attention_head_size", "attention_interface", "attention_probs", "attention_probs_dropout_prob", ...
vit/modeling_vit.py:ViTSelfOutput
[ -0.000045805918489350006, 0.04578312486410141, 0.04645640775561333, 0.005386250093579292, -0.000490934238769114, 0.04219229146838188, 0.028277814388275146, -0.018739663064479828, 0.0011501888511702418, 0.010940820910036564, 0.010323646478354931, 0.0002261804329464212, 0.003506673267111182, ...
[ "Dropout", "Linear", "ModelSelfOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "nn", "return", "self", "super" ]
vit/modeling_vit.py:ViTAttention
[ 0.000160034658620134, 0.038267627358436584, 0.04974791780114174, -0.0009707597200758755, 0.0004537246422842145, 0.028475617989897728, 0.035791490226984024, -0.014631740748882294, 0.01390015333890915, -0.004361384082585573, 0.0227354746311903, 0.02566182240843773, 0.004699039738625288, -0.0...
[ "ModelAttention", "ModelSelfAttention", "ModelSelfOutput", "Module", "_", "__init__", "attention", "class", "config", "def", "forward", "hidden_states", "nn", "output", "return", "self", "self_attn_output", "super" ]
vit/modeling_vit.py:ViTIntermediate
[ -0.00025367451598867774, 0.02240910567343235, 0.04047359153628349, 0.012862369418144226, -0.0009432404185645282, 0.03635763004422188, 0.03452831506729126, -0.018864808604121208, -0.001329111517407, -0.003772961674258113, 0.023209432139992714, -0.02103712037205696, -0.0013719861162826419, 0...
[ "ACT2FN", "Linear", "ModelIntermediate", "Module", "__init__", "class", "config", "def", "dense", "else", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_size", "isinstance", "nn", "return", "self", "str", "super" ]
vit/modeling_vit.py:ViTOutput
[ -0.0002648788213264197, 0.03665350377559662, 0.05406391620635986, 0.016150448471307755, -0.0012742818798869848, 0.0490240603685379, 0.04421328753232956, -0.02405386045575142, -0.0004617482190951705, 0.00538348313421011, 0.008533393032848835, -0.002505610464140773, 0.0014532541390508413, 0....
[ "Dropout", "Linear", "ModelOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "intermediate_size", "nn", "return", "self", "super" ]
vit/modeling_vit.py:ViTLayer
[ 0.000011264218301221263, 0.01948556676506996, 0.031132111325860023, 0.021837273612618446, 0.0001924759562825784, 0.03695538640022278, 0.010246720165014267, -0.0007384077762253582, 0.005207349546253681, 0.018253719434142113, 0.006355206482112408, 0.015790028497576714, 0.0030236223246902227, ...
[ "GradientCheckpointingLayer", "LayerNorm", "ModelAttention", "ModelIntermediate", "ModelLayer", "ModelOutput", "__init__", "attention", "attention_output", "chunk_size_feed_forward", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "hidden_states_norm", ...
vit/modeling_vit.py:ViTEncoder
[ -0.000046535220462828875, 0.013374047353863716, 0.02393842115998268, 0.035739049315452576, -0.000099216602393426, 0.0343904085457325, 0.007979474030435085, -0.024949902668595314, 0.003961639944463968, 0.011351082473993301, 0.018656233325600624, -0.0229269377887249, -0.000670809589792043, 0...
[ "BaseModelOutput", "False", "ModelEncoder", "ModelLayer", "Module", "ModuleList", "_", "__init__", "class", "config", "def", "enumerate", "for", "forward", "gradient_checkpointing", "hidden_states", "i", "in", "last_hidden_state", "layer", "layer_module", "nn", "num_hidde...
vit/modeling_vit.py:ViTPreTrainedModel
[ -0.00022147028357721865, 0.04240890219807625, 0.0029765604995191097, 0.022111592814326286, -0.0010984926484525204, 0.027327660471200943, 0.018709808588027954, -0.011452671140432358, 0.001410322729498148, 0.017235703766345978, 0.006690174341201782, 0.00046065819333307445, -0.00205524428747594...
[ "Conv2d", "LayerNorm", "Linear", "Model", "ModelConfig", "ModelEmbeddings", "ModelLayer", "ModelPreTrainedModel", "ModelSelfAttention", "None", "PreTrainedModel", "True", "_can_record_outputs", "_init_weights", "_no_split_modules", "_supports_attention_backend", "_supports_flash_attn...
vit/modeling_vit.py:ViTModel
[ -0.000045470504119293764, 0.034921348094940186, 0.003973422572016716, 0.02462402731180191, -0.00019412407709751278, 0.031115815043449402, 0.011808340437710285, -0.022945115342736244, 0.006631697993725538, 0.013990924693644047, 0.027422212064266205, 0.014550561085343361, -0.000930396490730345...
[ "BaseModelOutputWithPooling", "False", "LayerNorm", "ModelEmbeddings", "ModelEncoder", "ModelModel", "ModelPooler", "ModelPreTrainedModel", "None", "True", "__init__", "add_pooling_layer", "auto_docstring", "bool_masked_pos", "check_model_inputs", "class", "config", "def", "dtype...
vit/modeling_vit.py:ViTPooler
[ -0.0002990247157867998, 0.01198981050401926, 0.029974525794386864, 0.03251083195209503, -0.0013257963582873344, 0.03758344426751137, 0.03896688297390938, -0.010836943984031677, 0.0004449343541637063, -0.001873407862149179, 0.0103757968172431, -0.009626434184610844, -0.0008106091408990324, ...
[ "ACT2FN", "Linear", "ModelPooler", "Module", "__init__", "activation", "class", "config", "def", "dense", "first_token_tensor", "forward", "hidden_size", "hidden_states", "nn", "pooled_output", "pooler_act", "pooler_output_size", "return", "self", "super" ]
vit/modeling_vit.py:ViTForMaskedImageModeling
[ -0.00015785617870278656, 0.055453117936849594, 0.010383428074419498, -0.0016837992006912827, -0.0006735196802765131, 0.04041118174791336, 0.0007787570939399302, -0.059269730001688004, 0.009317021816968918, 0.01251624058932066, 0.015827711671590805, 0.022450655698776245, -0.000799804634880274...
[ "Conv2d", "False", "Got", "MaskedImageModelingOutput", "Model", "ModelForMaskedImageModeling", "ModelModel", "ModelPreTrainedModel", "None", "PixelShuffle", "Sequential", "True", "ValueError", "When", "__init__", "add_pooling_layer", "and", "as", "attentions", "auto_docstring",...
vit/modeling_vit.py:ViTForImageClassification
[ -0.00012937716383021325, 0.039386603981256485, 0.014210507273674011, 0.02629503421485424, -0.0004615617508534342, 0.012196419760584831, 0.02763775922358036, -0.006377944257110357, 0.003916281741112471, 0.012755888514220715, 0.0402817539870739, 0.007273094262927771, 0.000304211163893342, 0....
[ "False", "Identity", "ImageClassifierOutput", "Linear", "Model", "ModelForImageClassification", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "add_pooling_layer", "attentions", "auto_docstring", "can_return_tuple", "class", "classifier", "config", "def", "else", "fo...
longformer/modeling_longformer.py:LongformerBaseModelOutput
[ -0.00009416570537723601, 0.02614358253777027, 0.02184290625154972, 0.013411318883299828, -0.0008063767454586923, 0.03734797611832619, 0.05251351743936539, -0.010242399759590626, 0.01844763569533825, -0.011091217398643494, 0.016750000417232513, 0.029199326410889626, -0.001980574568733573, 0...
[ "FloatTensor", "ModelBaseModelOutput", "ModelOutput", "None", "attentions", "class", "global_attentions", "hidden_states", "last_hidden_state", "r", "torch" ]
longformer/modeling_longformer.py:LongformerBaseModelOutputWithPooling
[ -0.0001724688772810623, 0.013370783068239689, 0.020938077941536903, 0.028334680944681168, -0.0009530239040032029, 0.028334680944681168, 0.04802102595567703, -0.005291416309773922, 0.01467941328883171, -0.007453500293195248, 0.011038008145987988, 0.021393252536654472, -0.0029586413875222206, ...
[ "FloatTensor", "ModelBaseModelOutputWithPooling", "ModelOutput", "None", "attentions", "class", "global_attentions", "hidden_states", "last_hidden_state", "pooler_output", "r", "torch" ]
longformer/modeling_longformer.py:LongformerMaskedLMOutput
[ -0.00009794468496693298, 0.03328187018632889, 0.027997247874736786, -0.019227026030421257, -0.0005867896834388375, 0.04947304725646973, 0.04272672161459923, -0.014054843224585056, 0.016865812242031097, -0.02215043269097805, 0.022262871265411377, 0.03260723501443863, -0.0033309978898614645, ...
[ "ModelMaskedLMOutput", "ModelOutput", "None", "attentions", "class", "global_attentions", "hidden_states", "logits", "loss", "r" ]
longformer/modeling_longformer.py:LongformerQuestionAnsweringModelOutput
[ -0.000059797632275149226, 0.040521688759326935, 0.029941026121377945, 0.004811950959265232, -0.0003411982615943998, 0.052678197622299194, 0.06393422186374664, 0.013056988827884197, 0.01823476143181324, -0.00021192984422668815, 0.024763254448771477, 0.0339931957423687, -0.0015758435474708676,...
[ "ModelOutput", "ModelQuestionAnsweringModelOutput", "None", "attentions", "class", "end_logits", "global_attentions", "hidden_states", "loss", "r", "start_logits" ]
longformer/modeling_longformer.py:LongformerSequenceClassifierOutput
[ -0.00019721692660823464, 0.03260181471705437, 0.00594303896650672, 0.015055699273943901, -0.0007959427312016487, 0.04143147170543671, 0.051845941692590714, -0.010131466202437878, 0.012904884293675423, -0.014716096222400665, 0.04007306322455406, 0.03441302478313446, -0.003056420013308525, -...
[ "ModelOutput", "ModelSequenceClassifierOutput", "None", "attentions", "class", "global_attentions", "hidden_states", "logits", "loss", "r" ]
longformer/modeling_longformer.py:LongformerMultipleChoiceModelOutput
[ -0.00021064608881715685, 0.03655959293246269, 0.01565207540988922, 0.015309329144656658, -0.001042519579641521, 0.053925398737192154, 0.050269439816474915, -0.014566712081432343, 0.01245311088860035, -0.003598834853619337, 0.03221813961863518, 0.016223318874835968, -0.005483938846737146, -...
[ "ModelMultipleChoiceModelOutput", "ModelOutput", "None", "attentions", "class", "global_attentions", "hidden_states", "logits", "loss", "r" ]
longformer/modeling_longformer.py:LongformerTokenClassifierOutput
[ -0.000150643041706644, 0.039698872715234756, 0.018715182319283485, -0.029944293200969696, -0.0007762547465972602, 0.04423588886857033, 0.06442560255527496, -0.00725922267884016, 0.013327479362487793, -0.018261481076478958, 0.04242108017206192, 0.044689588248729706, -0.002169259823858738, -...
[ "ModelOutput", "ModelTokenClassifierOutput", "None", "attentions", "class", "global_attentions", "hidden_states", "logits", "loss", "r" ]
longformer/modeling_longformer.py:_get_question_end_index
[ -0.00004547962453216314, 0.027427712455391884, 0.004338056780397892, -0.015561029314994812, -0.00009708150901133195, 0.017800025641918182, 0.05373592674732208, -0.031122058629989624, 0.009571712464094162, 0.02250191941857338, 0.04366043955087662, 0.04366043955087662, -0.0011544828303158283, ...
[ "There", "You", "_get_question_end_index", "also", "answering", "assert", "avoid", "batch_size", "be", "consider", "def", "dimensions", "error", "every", "exactly", "f", "for", "forward", "function", "global_attention_mask", "have", "in", "input_ids", "manually", "mig...
longformer/modeling_longformer.py:_compute_global_attention_mask
[ 0, 0.020446401089429855, 0.02971990592777729, -0.0294964462518692, 0.00009427131590200588, 0.019664298743009567, 0.028155699372291565, -0.03977551311254501, 0.010893573984503746, 0.009720420464873314, 0.04781999811530113, 0.023686541244387627, 0.002709427382797003, -0.035306353121995926, ...
[ "True", "_compute_global_attention_mask", "_get_question_end_index", "arange", "attention_mask", "before_sep_token", "bool", "def", "device", "dim", "else", "expand_as", "if", "input_ids", "is", "question_end_index", "return", "sep_token_id", "shape", "to", "torch", "unsque...
longformer/modeling_longformer.py:create_position_ids_from_input_ids
[ 0.000022455322323367, -0.004883487243205309, 0.011218068189918995, -0.022659381851553917, -0.00028777692932635546, 0.023440739139914513, 0.021431533619761467, -0.019199082627892494, 0.01562715880572796, -0.013673764653503895, 0.020092062652111053, 0.013617953285574913, 0.0016045743832364678,...
[ "Model_position_ids_from_input_ids", "cumsum", "def", "dim", "incremental_indices", "input_ids", "int", "long", "mask", "ne", "padding_idx", "return", "torch", "type_as" ]
longformer/modeling_longformer.py:LongformerEmbeddings
[ -0.0002165139449061826, 0.010859278962016106, 0.0058255502954125404, -0.006758769974112511, -0.001018057344481349, 0.03574512526392937, 0.026921961456537247, -0.005966947413980961, 0.0043550231494009495, -0.013178187422454357, 0.019682442769408226, 0.018551267683506012, -0.000989777967333793...
[ "Dropout", "Embedding", "LayerNorm", "ModelEmbeddings", "Module", "None", "__init__", "arange", "class", "config", "create_position_ids_from_input_ids", "create_position_ids_from_inputs_embeds", "def", "device", "dropout", "dtype", "else", "embeddings", "eps", "expand", "forw...
longformer/modeling_longformer.py:LongformerSelfAttention
[ -0.0000374915252905339, 0.029132239520549774, 0.021115226671099663, -0.02156689018011093, -0.00032110390020534396, 0.024276865646243095, 0.046295419335365295, -0.03206804394721985, 0.01535652857273817, 0.013154673390090466, 0.025067275390028954, 0.0440371036529541, 0.00010585842392174527, ...
[ "False", "Given", "Linear", "ModelSelfAttention", "Module", "None", "Sequence", "Size", "The", "True", "Unexpected", "ValueError", "__init__", "_chunk", "_compute_attn_output_with_global_indices", "_compute_global_attn_output_from_hidden", "_concat_with_global_key_attn_probs", "_ge...
longformer/modeling_longformer.py:LongformerSelfOutput
[ -0.00012168083776487038, 0.04875698313117027, 0.03859927877783775, 0.020879726856946945, -0.0006630723946727812, 0.05620596557855606, 0.02370131015777588, -0.01952536590397358, 0.0035551965702325106, 0.017155233770608902, 0.01365646906197071, 0.003512872848659754, 0.0036962758749723434, -0...
[ "Dropout", "LayerNorm", "Linear", "ModelSelfOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "layer_norm_eps", "nn", "return", "self", "super" ]
longformer/modeling_longformer.py:LongformerAttention
[ 0.0002205297350883484, 0.03687826544046402, 0.020943211391568184, -0.015252121724188328, 0.0008927897433750331, 0.04780515655875206, 0.0446181446313858, -0.034146539866924286, 0.01348788384348154, -0.00036992086097598076, 0.028455451130867004, 0.04552872106432915, 0.0015792774502187967, -0...
[ "False", "ModelAttention", "ModelSelfAttention", "ModelSelfOutput", "Module", "None", "__init__", "attention_mask", "attn_output", "class", "config", "def", "forward", "hidden_states", "is_global_attn", "is_index_global_attn", "is_index_masked", "layer_id", "nn", "output", "o...
longformer/modeling_longformer.py:LongformerIntermediate
[ -0.00025367451598867774, 0.02240910567343235, 0.04047359153628349, 0.012862369418144226, -0.0009432404185645282, 0.03635763004422188, 0.03452831506729126, -0.018864808604121208, -0.001329111517407, -0.003772961674258113, 0.023209432139992714, -0.02103712037205696, -0.0013719861162826419, 0...
[ "ACT2FN", "Linear", "ModelIntermediate", "Module", "__init__", "class", "config", "def", "dense", "else", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_size", "isinstance", "nn", "return", "self", "str", "super" ]
longformer/modeling_longformer.py:LongformerOutput
[ -0.0002591986849438399, 0.03935529664158821, 0.0507957898080349, 0.030660521239042282, -0.0012513039400801063, 0.04988054931163788, 0.03798243775963783, -0.023681821301579475, 0.002259497530758381, 0.014758236706256866, 0.011040075682103634, 0.00267421524040401, 0.0013013561256229877, 0.00...
[ "Dropout", "LayerNorm", "Linear", "ModelOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "intermediate_size", "layer_norm_eps", "nn", "return", "self", "su...
longformer/modeling_longformer.py:LongformerLayer
[ 0, 0.007582557387650013, 0.020557155832648277, 0.0010461120400577784, 0.00029312202241271734, 0.04246232286095619, 0.028757551684975624, -0.014322608709335327, 0.003931696526706219, 0, 0.009379904717206955, 0.00932373758405447, 0.0019658482633531094, 0.006880468688905239, -0.011401919648...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelIntermediate", "ModelLayer", "ModelOutput", "None", "__init__", "apply_chunking_to_forward", "attention", "attention_mask", "attn_output", "chunk_size_feed_forward", "class", "config", "def", "ff_chunk", "forward", "h...
longformer/modeling_longformer.py:LongformerEncoder
[ -0.00010677480895537883, 0.028576821088790894, 0.015474405139684677, 0.011069281958043575, -0.00030532298842445016, 0.04337351396679878, 0.01886296086013317, -0.03975905478000641, 0.008358437567949295, 0.0008436092175543308, 0.026882542297244072, 0.01886296086013317, 0.0005506403394974768, ...
[ "False", "ModelBaseModelOutput", "ModelEncoder", "ModelLayer", "Module", "ModuleList", "None", "True", "__init__", "all_attentions", "all_global_attentions", "all_hidden_states", "and", "any", "attention_mask", "attentions", "class", "config", "def", "else", "enumerate", "f...
longformer/modeling_longformer.py:LongformerPooler
[ -0.0002915378427132964, 0.011618588119745255, 0.03685896843671799, 0.022893769666552544, -0.001151842763647437, 0.03296703100204468, 0.037087906152009964, -0.0066105760633945465, -0.0011303798528388143, 0.0015954095870256424, 0.014022434130311012, -0.016025640070438385, -0.001244848710484802...
[ "Linear", "ModelPooler", "Module", "Tanh", "__init__", "activation", "class", "config", "def", "dense", "first_token_tensor", "forward", "hidden_size", "hidden_states", "nn", "pooled_output", "return", "self", "super" ]
longformer/modeling_longformer.py:LongformerLMHead
[ -0.00016041471099015325, 0.04488105699419975, 0.013015505857765675, 0.033211980015039444, -0.0006381525308825076, 0.017167003825306892, 0.05924299359321594, -0.01739140972495079, 0.002791040576994419, 0.02176731266081333, 0.014586343429982662, 0.015035153366625309, 0.001016836380586028, 0....
[ "LayerNorm", "Linear", "ModelLMHead", "Module", "Parameter", "__init__", "bias", "class", "config", "decoder", "def", "dense", "eps", "features", "forward", "gelu", "hidden_size", "kwargs", "layer_norm", "layer_norm_eps", "nn", "return", "self", "super", "torch", "v...
longformer/modeling_longformer.py:LongformerPreTrainedModel
[ -0.0002705881779547781, 0.02396945282816887, 0.015941405668854713, 0.015023915097117424, -0.0013332291273400187, 0.027639416977763176, 0.025919120758771896, -0.001361900707706809, 0.000417530070990324, 0.004931514151394367, 0.008716164156794548, -0.0045301117934286594, -0.002193376887589693,...
[ "Model", "ModelConfig", "ModelPreTrainedModel", "ModelSelfAttention", "PreTrainedModel", "True", "_no_split_modules", "base_model_prefix", "class", "config", "supports_gradient_checkpointing" ]
longformer/modeling_longformer.py:LongformerModel
[ 0.0000816382234916091, 0.016067806631326675, 0.011854221113026142, 0.010000243782997131, 0.00029846231336705387, 0.047641608864068985, 0.02078702300786972, -0.01067441701889038, 0.0067417374812066555, -0.011854221113026142, 0.036854829639196396, 0.0164048932492733, 0.0014326191740110517, 0...
[ "Expected", "Given", "Input", "ModelBaseModelOutputWithPooling", "ModelEmbeddings", "ModelEncoder", "ModelModel", "ModelPooler", "ModelPreTrainedModel", "None", "True", "__init__", "_merge_to_attention_mask", "_pad_to_window_size", "a", "add_pooling_layer", "an", "and", "are", ...
longformer/modeling_longformer.py:LongformerForMaskedLM
[ 0, 0.04162666201591492, 0.011074473150074482, -0.01836470514535904, -0.00009130179387284443, 0.04029104858636856, 0.02014552429318428, -0.013244847767055035, 0.006010266952216625, -0.0008138903067447245, 0.0255992840975523, 0.03962324187159538, -0.00006043309622327797, -0.01469176355749368...
[ "CrossEntropyLoss", "False", "Model", "ModelForMaskedLM", "ModelLMHead", "ModelMaskedLMOutput", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "_tied_weights_keys", "add_pooling_layer", "attention_mask", "attentions", "auto_docstring", "bias", "class", "config", "decod...
longformer/modeling_longformer.py:LongformerForSequenceClassification
[ -0.0002509402111172676, 0.03573954477906227, -0.0019368344219401479, 0.015268475748598576, -0.0006785989389754832, 0.023977162316441536, 0.01888767071068287, 0.008199736475944519, -0.0025164710823446512, 0.004043318331241608, 0.06243110075592995, 0.01583397388458252, 0.0014420227380469441, ...
[ "BCEWithLogitsLoss", "CrossEntropyLoss", "False", "MSELoss", "Model", "ModelClassificationHead", "ModelForSequenceClassification", "ModelModel", "ModelPreTrainedModel", "ModelSequenceClassifierOutput", "None", "__init__", "add_pooling_layer", "and", "attention_mask", "attentions", "a...
longformer/modeling_longformer.py:LongformerClassificationHead
[ -0.00030054955277591944, 0.026104874908924103, 0.0368674099445343, 0.010361803695559502, -0.001187886344268918, 0.010361803695559502, 0.05358368903398514, 0.004379436373710632, -0.00357797066681087, 0.008529881946742535, 0.025188913568854332, 0.0037497133016586304, -0.0013453169958665967, ...
[ "Dropout", "Linear", "ModelClassificationHead", "Module", "__init__", "class", "config", "def", "dense", "dropout", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "kwargs", "nn", "num_labels", "out_proj", "output", "return", "self", "super", "tanh", "...
longformer/modeling_longformer.py:LongformerForQuestionAnswering
[ -0.00011925508442800492, 0.03456994518637657, 0.0062293242663145065, 0.011672968044877052, -0.0004173927882220596, 0.044447071850299835, 0.03973298519849777, 0.01706049218773842, 0.004349302966147661, 0.023233694955706596, 0.031876180320978165, 0.02794777974486351, 0.0011644908227026463, -...
[ "CrossEntropyLoss", "False", "It", "Linear", "Model", "ModelForQuestionAnswering", "ModelModel", "ModelPreTrainedModel", "ModelQuestionAnsweringModelOutput", "None", "Please", "__init__", "_compute_global_attention_mask", "add_pooling_layer", "and", "attention_mask", "attentions", ...
longformer/modeling_longformer.py:LongformerForTokenClassification
[ -0.00014897204528097063, 0.040844786912202835, -0.0006805113516747952, -0.029336033388972282, -0.0005465246504172683, 0.037008535116910934, 0.04310140386223793, 0.010493273846805096, 0.0017065673600882292, 0.00482352077960968, 0.0627339780330658, 0.032720960676670074, 0.00029970708419568837,...
[ "CrossEntropyLoss", "Dropout", "False", "Linear", "Model", "ModelForTokenClassification", "ModelModel", "ModelPreTrainedModel", "ModelTokenClassifierOutput", "None", "__init__", "add_pooling_layer", "attention_mask", "attentions", "auto_docstring", "class", "classifier", "config", ...
longformer/modeling_longformer.py:LongformerForMultipleChoice
[ -0.00018197833560407162, 0.05540622025728226, 0.011250854469835758, 0.02465011365711689, -0.0006855106330476701, 0.038445133715867996, 0.04545571655035019, -0.009724357165396214, 0.0018233169103041291, 0.004381614271551371, 0.05043096840381622, 0.0052014002576470375, -0.0020494647324085236, ...
[ "CrossEntropyLoss", "Dropout", "False", "Linear", "Model", "ModelForMultipleChoice", "ModelModel", "ModelMultipleChoiceModelOutput", "ModelPreTrainedModel", "None", "__init__", "_compute_global_attention_mask", "and", "attention_mask", "attentions", "auto_docstring", "before_sep_toke...
olmo3/modeling_olmo3.py:Olmo3RMSNorm
[ -0.00008864036499289796, 0.041545428335666656, 0.032739605754613876, 0.0526091568171978, -0.00047274859389290214, 0.039061736315488815, 0.02190166711807251, -0.02822379767894745, 0.007563977502286434, 0.042900171130895615, 0.01941797323524952, 0.006124563980847597, 0.0027235965244472027, 0...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tuple", ...
olmo3/modeling_olmo3.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
olmo3/modeling_olmo3.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
olmo3/modeling_olmo3.py:apply_rotary_pos_emb
[ -0.00016319907444994897, 0.027587739750742912, 0.0264524407684803, 0.0003601023054216057, -0.0006527962977997959, 0.023273607715964317, 0.045638974756002426, -0.004995310679078102, 0.013112691231071949, 0.03632953390479088, 0.006130608730018139, 0.0023841257207095623, -0.0006918221479281783,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
olmo3/modeling_olmo3.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
olmo3/modeling_olmo3.py:Olmo3Attention
[ -0.00007041082426439971, 0.03830348700284958, 0.023996008560061455, 0.006421466823667288, -0.0004013416764792055, 0.03537439554929733, 0.033121250569820404, -0.012279647402465343, 0.0033093085512518883, 0.0182504840195179, 0.02005300112068653, 0.03267062082886696, 0.0003626157413236797, -0...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "ModelRMSNorm", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "assert", "attention_dropout", "attention_interface", "attention_mask", "attention_type", "attn_output", "attn_weig...
olmo3/modeling_olmo3.py:Olmo3MLP
[ -0.0002434849739074707, 0.024471141397953033, 0.0228551235049963, 0.026548879221081734, -0.0009450823999941349, 0.060485273599624634, 0.036244992166757584, -0.005078915972262621, 0, -0.005021201446652412, 0.028164898976683617, -0.04640282690525055, -0.0015583038330078125, 0.009176678024232...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
olmo3/modeling_olmo3.py:Olmo3DecoderLayer
[ -0.00018600541807245463, 0.047163888812065125, 0.015418963506817818, -0.0006802483694627881, -0.0008432245813310146, 0.03900090605020523, 0.04534989222884178, -0.03310542181134224, 0.007482732180505991, -0.0071992953307926655, -0.002409212989732623, 0.02777680940926075, -0.000154118781210854...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "ModelRMSNorm", "None", "Tensor", "_", "__init__", "attention_mask", "cache_position", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "kwargs", "layer_idx", ...
olmo3/modeling_olmo3.py:Olmo3RotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
olmo3/modeling_olmo3.py:Olmo3PreTrainedModel
[ -0.00034975787275470793, 0.027630871161818504, 0.016904963180422783, 0.011250545270740986, -0.0018799485405907035, 0.029612833634018898, 0.03077869303524494, -0.03241089731454849, -0.00408050836995244, 0.003993069287389517, 0.006383080966770649, 0.004459412768483162, -0.004255387466400862, ...
[ "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn", "_supports_flex_attn", "_supp...
olmo3/modeling_olmo3.py:Olmo3Model
[ -0.00010974110045935959, 0.043776560574769974, -0.0002344669192098081, -0.00552848307415843, -0.0008426706190221012, 0.038586556911468506, 0.04197134077548981, -0.01872914656996727, 0.012580120004713535, 0.003723264206200838, 0.021211324259638786, 0.00654391897842288, -0.0011776234023272991,...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "None", "ValueError", "You", "__init__", "and", "arange", "attention_mask", "attention_type", "auto_do...
olmo3/modeling_olmo3.py:Olmo3ForCausalLM
[ -0.00028262686100788414, 0.03526614233851433, 0.008361488580703735, -0.001301150070503354, -0.0012087186332792044, 0.02741658128798008, 0.0389065183699131, -0.007394513580948114, 0.003640376031398773, 0.026734011247754097, 0.025823917239904404, 0.0026734010316431522, 0.0009385344455949962, ...
[ "CausalLMOutputWithPast", "GenerationMixin", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "cache_position", "can_return_tuple", "class", "colwi...
reformer/modeling_reformer.py:ReformerDynamicCache
[ -0.0002730890701059252, -0.03564080223441124, -0.012451434507966042, -0.017706166952848434, -0.0011137750698253512, 0.012280084192752838, -0.003012904431670904, -0.06991080194711685, -0.0020990376360714436, 0.007025350350886583, 0.004740683827549219, -0.002070479327812791, -0.002327504334971...
[ "Any", "ModelDynamicCache", "None", "__len__", "_distributed_cache_data", "_seen_tokens", "and", "append", "beam_idx", "buckets", "buckets_cache", "cache_kwargs", "cat", "class", "def", "device", "dim", "else", "for", "get_seq_length", "get_start_idx", "if", "in", "inde...
reformer/modeling_reformer.py:_stable_argsort
[ -0.0002143497549695894, -0.003611574647948146, 0.03292412310838699, 0.00009317652438767254, -0.0007699092966504395, 0.0010428772075101733, 0.034715909510850906, -0.019149744883179665, 0.005879307631403208, 0.0904853418469429, -0.005739323794841766, 0.0037795547395944595, -0.00337360263802111...
[ "_stable_argsort", "arange", "argsort", "def", "device", "dim", "expand", "return", "scale_offset", "scaled_vector", "shape", "torch", "vector", "view" ]
reformer/modeling_reformer.py:_get_least_common_mult_chunk_len
[ -0.00018420838750898838, 0.011902695521712303, 0.047837499529123306, 0.004761078394949436, -0.0007864281069487333, 0.027999674901366234, 0.030153494328260422, -0.06710853427648544, 0.013886477798223495, 0.05055811628699303, 0.028566468507051468, -0.0009139569592662156, 0.00013815629063174129...
[ "NotImplementedError", "Only", "Select", "_get_least_common_mult_chunk_len", "and", "attn", "attn_layers", "attn_types", "attn_types_set", "but", "config", "def", "elif", "else", "exist", "f", "from", "if", "layer", "lcm", "len", "local", "local_attn_chunk_length", "lsh...
reformer/modeling_reformer.py:_get_min_chunk_len
[ -0.00011772170546464622, 0.03328537195920944, 0.043630827218294144, 0.0004884572117589414, -0.00040236226050183177, 0.030811460688710213, 0.03238577023148537, -0.06387193500995636, 0.010176777839660645, 0.048803552985191345, 0.018666796386241913, 0.00604421878233552, 0.003767094574868679, ...
[ "NotImplementedError", "Only", "Select", "_get_min_chunk_len", "and", "attn", "attn_layers", "attn_types", "attn_types_set", "but", "config", "def", "elif", "else", "exist", "f", "from", "if", "layer", "len", "local", "local_attn_chunk_length", "lsh", "lsh_attn_chunk_le...
reformer/modeling_reformer.py:AxialPositionEmbeddings
[ -0.0002377346099819988, 0.022765323519706726, -0.0012655345490202308, -0.011382661759853363, -0.001251234789378941, 0.011840255931019783, 0.025625288486480713, -0.014757419936358929, 0.004003951326012611, 0.0060345265083014965, 0.004232748411595821, 0.01990535669028759, 0.0005326684913598001...
[ "Got", "If", "Make", "ModelPositionEmbeddings", "Model_pos_embd_dim", "Model_pos_embds", "Model_pos_embds_dim", "Model_pos_shape", "Module", "Parameter", "ParameterList", "True", "ValueError", "You", "__init__", "_get_least_common_mult_chunk_len", "append", "at", "ax_shape", "a...
reformer/modeling_reformer.py:PositionEmbeddings
[ 0.00009191793651552871, 0.02613297663629055, 0.023417862132191658, 0.00028459206805564463, 0.000293430348392576, 0.047740764915943146, 0.03891663998365402, -0.000749484752304852, 0.011256412602961063, -0.03371267020702362, 0.036654043942689896, -0.0036484349984675646, 0.001661593560129404, ...
[ "Embedding", "ModelEmbeddings", "Model_embeddings", "Model_ids", "Module", "__init__", "class", "config", "def", "dropout", "embedding", "forward", "functional", "hidden_dropout_prob", "hidden_size", "max_Model_embeddings", "nn", "p", "return", "self", "super", "training" ]
reformer/modeling_reformer.py:ReformerEmbeddings
[ -0.0003125129733234644, 0.023700406774878502, 0.009942609816789627, -0.009248939342796803, -0.0014740496408194304, 0.04578224942088127, 0.023700406774878502, 0.005289237014949322, 0.004855692852288485, -0.004797887057065964, 0.01630125567317009, 0.021619394421577454, -0.000643090286757797, ...
[ "AxialPositionEmbeddings", "Embedding", "Length", "ModelEmbeddings", "Module", "None", "PositionEmbeddings", "Sequence", "ValueError", "__init__", "arange", "axial_pos_embds", "be", "class", "config", "def", "device", "dropout", "dtype", "else", "embeddings", "equal", "ex...
reformer/modeling_reformer.py:EfficientAttentionMixin
[ -0.000262076297076419, 0.004164888057857752, 0.031732480973005295, -0.024026021361351013, -0.0008570603094995022, 0.003045751480385661, 0.021419424563646317, -0.04329216852784157, 0.003272412111982703, 0.023119378834962845, -0.013372973538935184, 0.019152818247675896, -0.002082444028928876, ...
[ "ModelAttentionMixin", "None", "_merge_hidden_size_dims", "_split_hidden_size_dim", "_split_seq_length_dim_to", "and", "append", "attn_head_size", "batch_size", "but", "cat", "class", "def", "dim", "dim_factor_1", "dim_factor_2", "elif", "else", "for", "i", "if", "in", "i...
reformer/modeling_reformer.py:LSHSelfAttention
[ -0.0002747849212028086, -0.009425302036106586, 0.030804645270109177, -0.005747135262936354, -0.0012787376763299108, 0.030804645270109177, 0.0496552512049675, -0.01793106272816658, 0.016781635582447052, 0.017356349155306816, 0.004655179567635059, 0.013333354145288467, 0.0015732783358544111, ...
[ "At", "EfficientAttentionMixin", "False", "If", "Linear", "Make", "Model", "ModelOutput", "Module", "None", "ReverseSort", "Setting", "The", "There", "True", "__init__", "_attend", "_compute_attn_mask", "_expand_to_indices_in_relevant_chunk", "_gather_by_expansion", "_get_rel...
reformer/modeling_reformer.py:ReverseSort
[ -0.000047009951231302693, 0.008858763612806797, -0.027523454278707504, 0.00341257406398654, -0.00010054905578726903, 0.015154615044593811, 0.02986350655555725, 0.0038722269237041473, 0.0075773075222969055, 0.04546384513378143, 0.008580186404287815, 0.006072989199310541, -0.001511282869614660...
[ "Function", "ModelSort", "None", "backward", "class", "ctx", "def", "expand", "expanded_sort_indices", "expanded_undo_sort_indices", "forward", "gather", "grad_logits", "grad_out_vectors", "logits", "no_grad", "out_vectors", "return", "shape", "sorted_bucket_idx", "staticmeth...
reformer/modeling_reformer.py:LocalSelfAttention
[ -0.00014444172848016024, 0.016695713624358177, 0.032719116657972336, -0.01535109244287014, -0.0006688089924864471, 0.010532866232097149, 0.03002987429499626, -0.036304771900177, 0.0012115597492083907, 0.01882469654083252, 0.012101591564714909, 0.03227090835571289, 0.0015547183575108647, -0...
[ "EfficientAttentionMixin", "False", "If", "Linear", "ModelSelfAttention", "ModelSelfAttentionOutput", "Model_attention_probs_dropout_prob", "Model_attn_chunk_length", "Model_num_chunks_after", "Model_num_chunks_before", "Module", "None", "There", "True", "__init__", "_compute_attn_mask...
reformer/modeling_reformer.py:ReformerSelfOutput
[ -0.00013615239004138857, 0.05306956544518471, 0.04070165753364563, 0.009894326329231262, -0.0005832592723891139, 0.02675965428352356, 0.034855011850595474, -0.02608504146337509, -0.00022838465520180762, 0.021924925968050957, 0.004975271876901388, 0.006071518175303936, 0.0007870486588217318, ...
[ "Linear", "ModelSelfOutput", "Module", "__init__", "all_head_size", "attention_head_size", "class", "config", "def", "dense", "dropout", "forward", "functional", "hidden_dropout_prob", "hidden_size", "hidden_states", "nn", "num_attention_heads", "p", "return", "self", "supe...
reformer/modeling_reformer.py:ReformerAttention
[ 0, 0.021851127967238426, 0.02140289917588234, -0.00549079617485404, 0, 0.04101288691163063, 0.024876669049263, -0.03361712023615837, 0.006163138896226883, 0.006611366756260395, 0.018825586885213852, 0.026669582352042198, 0.004426253959536552, -0.00593902450054884, -0.00795605219900608, ...
[ "AttentionOutput", "False", "LSHSelfAttention", "LayerNorm", "LocalSelfAttention", "ModelAttention", "ModelSelfOutput", "Module", "None", "NotImplementedError", "Only", "Select", "__init__", "and", "attention_mask", "attention_output", "attention_probs", "attn", "attn_layers", ...
reformer/modeling_reformer.py:ReformerFeedForwardDense
[ -0.0002692072303034365, 0.03629631549119949, 0.042039401829242706, 0.0080977538600564, -0.0010552923195064068, 0.021249424666166306, 0.03951244428753853, -0.02871543914079666, -0.001809072564356029, -0.010911867022514343, 0.02814112976193428, -0.013381394557654858, 0.002828470664098859, -0...
[ "ACT2FN", "Linear", "ModelFeedForwardDense", "Module", "__init__", "act_fn", "class", "config", "def", "dense", "dropout", "else", "feed_forward_size", "forward", "functional", "hidden_act", "hidden_dropout_prob", "hidden_size", "hidden_states", "if", "isinstance", "nn", ...
reformer/modeling_reformer.py:ReformerFeedForwardOutput
[ -0.00023242719180416316, 0.04897419735789299, 0.05080500990152359, 0.009096842259168625, -0.0011371052823960781, 0.014760914258658886, 0.0492030493915081, -0.040506694465875626, 0.0006650993600487709, -0.00023600299027748406, 0.01750713214278221, -0.0015375952934846282, 0.003361254697665572,...
[ "Linear", "ModelFeedForwardOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "feed_forward_size", "forward", "functional", "hidden_dropout_prob", "hidden_size", "hidden_states", "nn", "p", "return", "self", "super", "training" ]
reformer/modeling_reformer.py:ChunkReformerFeedForward
[ 0.00003216392724425532, 0.05639702454209328, 0.029100865125656128, 0.02481469139456749, -0.00009208577102981508, 0.034966155886650085, 0.030228804796934128, -0.030905568972229958, 0.007726392243057489, -0.0028198512736707926, 0.020190134644508362, 0.013648079708218575, 0.00704962806776166, ...
[ "LayerNorm", "ModelFeedForwardDense", "ModelFeedForwardOutput", "ModelModelFeedForward", "Model_size_feed_forward", "Module", "__init__", "apply_Modeling_to_forward", "attention_output", "class", "config", "def", "dense", "eps", "forward", "forward_Model", "hidden_size", "hidden_st...
reformer/modeling_reformer.py:ReformerLayer
[ -0.000024129911253112368, 0.01509375125169754, 0.0029628474730998278, -0.023479169234633446, 0, 0.04941805824637413, 0.008329514414072037, -0.0031585071701556444, 0.008553125895559788, 0.0031026045326143503, 0.019454168155789375, -0.016547223553061485, 0.003773437812924385, -0.021019445732...
[ "ChunkModelFeedForward", "False", "If", "ModelAttention", "ModelBackwardOutput", "ModelLayer", "ModelModel", "ModelOutput", "Module", "None", "True", "__init__", "_init_attention_seed", "_init_feed_forward_seed", "and", "assert", "attention", "attention_mask", "attention_probs", ...
reformer/modeling_reformer.py:_ReversibleFunction
[ -0.00024668718106113374, -0.0053667775355279446, 0.024193091318011284, -0.022716518491506577, -0.0011642215540632606, 0.025783248245716095, 0.015447232872247696, -0.009711311198771, 0.0014907715376466513, 0.00857548601925373, 0.006758164148777723, -0.0009796498343348503, -0.00153336499352008...
[ "Function", "ModelBackwardOutput", "None", "True", "_ReversibleFunction", "after", "all_attentions", "all_buckets", "all_hidden_states", "assert", "attention_mask", "attn_output", "backpropagation", "backward", "backward_pass", "be", "buckets", "cat", "chunk", "class", "ctx",...
reformer/modeling_reformer.py:ReformerEncoder
[ -0.00018442673899699003, 0.009419950656592846, 0.03790678828954697, 0.004766721744090319, -0.0008370136492885649, 0.04108460247516632, 0.004511361476033926, -0.03540993481874466, 0.006185389123857021, 0.013902938924729824, 0.009419950656592846, -0.015094619244337082, -0.00037240012898109853,...
[ "False", "LayerNorm", "ModelDynamicCache", "ModelEncoder", "ModelEncoderOutput", "ModelLayer", "Module", "ModuleList", "None", "_ReversibleFunction", "__init__", "all_attentions", "all_hidden_states", "and", "apply", "attention_mask", "cat", "class", "config", "def", "dim", ...
reformer/modeling_reformer.py:ReformerOnlyLMHead
[ -0.0002797181368805468, 0.01948821172118187, 0.020621245726943016, 0.014162943698465824, -0.0010976281482726336, 0.015749193727970123, 0.043281957507133484, -0.035124100744724274, 0.003271640045568347, 0.009347543120384216, 0.005608526058495045, 0.010593881830573082, 0.0013808869989588857, ...
[ "Linear", "ModelOnlyLMHead", "Module", "Parameter", "__init__", "apply_chunking_to_forward", "bias", "chunk_size_lm_head", "class", "config", "decoder", "def", "forward", "forward_chunk", "hidden_size", "hidden_states", "nn", "return", "self", "seq_len_dim", "super", "torch...
reformer/modeling_reformer.py:ReformerPreTrainedModel
[ -0.00021534133702516556, 0.036447856575250626, 0.006862448062747717, 0.009168913587927818, -0.0011603516759350896, 0.03462546318769455, 0.03622005507349968, -0.013269297778606415, 0.0038725845515727997, 0.010763507336378098, -0.0012315388303250074, 0.007631269749253988, -0.001964767230674624...
[ "AxialPositionEmbeddings", "DUMMY_INPUTS", "DUMMY_MASK", "LSHSelfAttention", "LocalSelfAttention", "Model", "ModelConfig", "ModelPreTrainedModel", "PreTrainedModel", "_init_weights", "attention_mask", "axial_norm_std", "base_model_prefix", "class", "config", "constant_", "def", "du...
reformer/modeling_reformer.py:ReformerModelOutput
[ -0.0000896177880349569, 0.007872307673096657, 0.021929999813437462, 0.0026428462006151676, -0.0007626298465766013, 0.03823692351579666, 0.038911692798137665, -0.029240000993013382, 0.020580461248755455, -0.010121538303792477, 0.015519692562520504, 0.021480154246091843, 0.00014760576596017927...
[ "FloatTensor", "ModelModelOutput", "ModelOutput", "None", "attentions", "class", "hidden_states", "last_hidden_state", "past_buckets_states", "r", "torch" ]
reformer/modeling_reformer.py:ReformerModelWithLMHeadOutput
[ -0.00014589549391530454, -0.0050623975694179535, 0.01856212504208088, -0.009224813431501389, -0.000829670752864331, 0.05107396841049194, 0.04049918055534363, -0.014624704606831074, 0.017774641513824463, -0.019462106749415398, 0.021374568343162537, 0.011362270452082157, 0.000622253050096333, ...
[ "FloatTensor", "ModelModelWithLMHeadOutput", "ModelOutput", "None", "attentions", "class", "hidden_states", "logits", "loss", "past_buckets_states", "r", "torch" ]
reformer/modeling_reformer.py:ReformerModel
[ -0.00012219026393722743, 0.007595136296004057, 0.01052067056298256, -0.0019128491403535008, -0.0004184357530903071, 0.04230772331357002, 0.018228325992822647, -0.029480380937457085, 0.007088793907314539, 0.003966348711401224, 0.020816298201680183, 0.0007243509753607213, 0.0012869535712525249...
[ "If", "Input", "ModelEmbeddings", "ModelEncoder", "ModelModel", "ModelModelOutput", "ModelPreTrainedModel", "None", "Please", "Select", "True", "ValueError", "__init__", "_get_least_common_mult_chunk_len", "_get_min_chunk_len", "_pad_to_mult_of_chunk_length", "a", "all_attentions",...
reformer/modeling_reformer.py:ReformerModelWithLMHead
[ -0.000245878123678267, 0.032151658087968826, 0.0027453440707176924, -0.004075562581419945, -0.001103798160329461, 0.03894426301121712, 0.015962619334459305, -0.03305733948945999, -0.0019811762031167746, 0.007981309667229652, 0.015849409624934196, -0.005858621094375849, 0.001783058512955904, ...
[ "False", "GenerationMixin", "If", "Model", "ModelModel", "ModelModelWithLMHead", "ModelModelWithLMHeadOutput", "ModelOnlyLMHead", "ModelPreTrainedModel", "Model_outputs", "None", "True", "Warning", "__init__", "and", "assert", "attention_mask", "attentions", "attn_layers", "aut...
reformer/modeling_reformer.py:ReformerForMaskedLM
[ -0.00008672988769831136, 0.04083539545536041, 0.012551863677799702, -0.012607649900019169, -0.0002771869767457247, 0.040389109402894974, 0.015954812988638878, -0.007810048293322325, 0.00786583498120308, -0.000557860592380166, 0.019971409812569618, 0.02878560684621334, -0.0008088978938758373,...
[ "CrossEntropyLoss", "False", "If", "MaskedLMOutput", "Model", "ModelForMaskedLM", "ModelModel", "ModelOnlyLMHead", "ModelPreTrainedModel", "Model_outputs", "None", "__init__", "assert", "attention", "attention_mask", "attentions", "auto_docstring", "bi", "bias", "class", "con...
reformer/modeling_reformer.py:ReformerForSequenceClassification
[ -0.0003517685108818114, 0.028227191418409348, -0.0011999413836747408, 0.013999315910041332, -0.0012070838129147887, 0.019999021664261818, 0.021027542650699615, 0.00965667050331831, -0.0027284380048513412, 0.007428208366036415, 0.04936901479959488, 0.005371165927499533, 0.00038212418439798057...
[ "BCEWithLogitsLoss", "CrossEntropyLoss", "MSELoss", "Model", "ModelClassificationHead", "ModelForSequenceClassification", "ModelModel", "ModelPreTrainedModel", "None", "SequenceClassifierOutput", "True", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "class", ...
reformer/modeling_reformer.py:ReformerClassificationHead
[ -0.00036678771721199155, 0.02416483871638775, 0.04050487279891968, 0.003941169939935207, -0.0012801610864698887, 0.012197489850223064, 0.04971052706241608, 0.0021863426081836224, -0.005350785795599222, 0.00909058190882206, 0.02658132277429104, 0.0030925238970667124, -0.0006580603658221662, ...
[ "Dropout", "Linear", "ModelClassificationHead", "Module", "None", "__init__", "class", "classifier_dropout", "config", "def", "dense", "dropout", "else", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "if", "is", "kwargs", "nn", "not", "num_labels", "...
reformer/modeling_reformer.py:ReformerForQuestionAnswering
[ -0.00016852373664733022, 0.017414119094610214, 0.007415044587105513, 0.01404364500194788, -0.0005757894250564277, 0.044265568256378174, 0.039322204887866974, 0.02437976747751236, 0.007246520835906267, 0.03145776316523552, 0.017751166597008705, 0.016740024089813232, 0.0016431064577773213, -...
[ "CrossEntropyLoss", "False", "Linear", "Model", "ModelForQuestionAnswering", "ModelModel", "ModelPreTrainedModel", "Model_outputs", "None", "QuestionAnsweringModelOutput", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "clamp", "class", "config", "contiguous...
visual_bert/modeling_visual_bert.py:VisualBertEmbeddings
[ -0.000199052898096852, 0.03594326600432396, 0.017289165407419205, 0.003099537920206785, -0.0009952645050361753, 0.04572529345750809, 0.0009383922442793846, -0.037535689771175385, 0.006113767623901367, -0.002744086319580674, 0.036625731736421585, 0.04231295734643936, -0.0008388658170588315, ...
[ "Dropout", "Embedding", "False", "Found", "LayerNorm", "Linear", "Model", "ModelEmbeddings", "Model_embedding_dim", "Model_embeddings", "Model_embeds", "Model_position_embeddings", "Model_position_ids", "Model_projection", "Model_token_type_embeddings", "Model_token_type_ids", "Modul...
visual_bert/modeling_visual_bert.py:VisualBertSelfAttention
[ 0.00001141523080150364, 0.03803183510899544, 0.0398215688765049, -0.0025587594136595726, -0.0003897564020007849, 0.011856983415782452, 0.02527998387813568, -0.008277516812086105, 0.0029922104440629482, 0.02047007530927658, 0.012136629782617092, 0.00950795877724886, -0.000302367057884112, -...
[ "Dropout", "False", "Linear", "ModelSelfAttention", "Module", "None", "The", "ValueError", "_", "__init__", "a", "all_head_size", "and", "attention", "attention_head_size", "attention_mask", "attention_probs", "attention_probs_dropout_prob", "attention_scores", "batch_size", ...
visual_bert/modeling_visual_bert.py:VisualBertSelfOutput
[ -0.00012168083776487038, 0.04875698313117027, 0.03859927877783775, 0.020879726856946945, -0.0006630723946727812, 0.05620596557855606, 0.02370131015777588, -0.01952536590397358, 0.0035551965702325106, 0.017155233770608902, 0.01365646906197071, 0.003512872848659754, 0.0036962758749723434, -0...
[ "Dropout", "LayerNorm", "Linear", "ModelSelfOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "layer_norm_eps", "nn", "return", "self", "super" ]
visual_bert/modeling_visual_bert.py:VisualBertAttention
[ 0.0001855551963672042, 0.03709689900279045, 0.04410912096500397, 0.0014561665011569858, 0.0006185172824189067, 0.04003750905394554, 0.038227904587984085, -0.020244954153895378, 0.016060244292020798, -0.004043336026370525, 0.01900085248053074, 0.023637963458895683, 0.0037323101423680782, -0...
[ "False", "ModelAttention", "ModelSelfAttention", "ModelSelfOutput", "Module", "None", "__init__", "attention_mask", "attention_output", "class", "config", "def", "forward", "hidden_states", "nn", "output", "output_attentions", "outputs", "return", "self", "self_outputs", "s...
visual_bert/modeling_visual_bert.py:VisualBertIntermediate
[ -0.00025367451598867774, 0.02240910567343235, 0.04047359153628349, 0.012862369418144226, -0.0009432404185645282, 0.03635763004422188, 0.03452831506729126, -0.018864808604121208, -0.001329111517407, -0.003772961674258113, 0.023209432139992714, -0.02103712037205696, -0.0013719861162826419, 0...
[ "ACT2FN", "Linear", "ModelIntermediate", "Module", "__init__", "class", "config", "def", "dense", "else", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_size", "isinstance", "nn", "return", "self", "str", "super" ]
visual_bert/modeling_visual_bert.py:VisualBertOutput
[ -0.0002591986849438399, 0.03935529664158821, 0.0507957898080349, 0.030660521239042282, -0.0012513039400801063, 0.04988054931163788, 0.03798243775963783, -0.023681821301579475, 0.002259497530758381, 0.014758236706256866, 0.011040075682103634, 0.00267421524040401, 0.0013013561256229877, 0.00...
[ "Dropout", "LayerNorm", "Linear", "ModelOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "intermediate_size", "layer_norm_eps", "nn", "return", "self", "su...
visual_bert/modeling_visual_bert.py:VisualBertLayer
[ -0.000056430933909723535, 0.01371840387582779, 0.028668664395809174, 0.008343028835952282, 0, 0.037179674953222275, 0.026540910825133324, -0.004423485137522221, 0.006103289779275656, 0.0013578420039266348, 0.009294918738305569, 0.00713916914537549, 0.002449715044349432, -0.0009658876224420...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelIntermediate", "ModelLayer", "ModelOutput", "None", "__init__", "apply_chunking_to_forward", "attention", "attention_mask", "attention_output", "chunk_size_feed_forward", "class", "config", "def", "feed_forward_chunk", ...
visual_bert/modeling_visual_bert.py:VisualBertEncoder
[ -0.0000817262553027831, 0.011192102916538715, 0.017772383987903595, 0.025758707895874977, -0.00029878414352424443, 0.0384693369269371, 0.011417069472372532, -0.03127039596438408, 0.0049211508594453335, 0.007930083200335503, 0.019797086715698242, -0.0018559767631813884, -0.0015677380142733455...
[ "BaseModelOutput", "False", "ModelEncoder", "ModelLayer", "Module", "ModuleList", "None", "True", "_", "__init__", "all_hidden_states", "all_self_attentions", "attention_mask", "attentions", "class", "config", "def", "else", "enumerate", "for", "forward", "gradient_checkpoi...
visual_bert/modeling_visual_bert.py:VisualBertPooler
[ -0.0002915378427132964, 0.011618588119745255, 0.03685896843671799, 0.022893769666552544, -0.001151842763647437, 0.03296703100204468, 0.037087906152009964, -0.0066105760633945465, -0.0011303798528388143, 0.0015954095870256424, 0.014022434130311012, -0.016025640070438385, -0.001244848710484802...
[ "Linear", "ModelPooler", "Module", "Tanh", "__init__", "activation", "class", "config", "def", "dense", "first_token_tensor", "forward", "hidden_size", "hidden_states", "nn", "pooled_output", "return", "self", "super" ]
visual_bert/modeling_visual_bert.py:VisualBertPredictionHeadTransform
[ -0.0002919524849858135, 0.041955187916755676, 0.0442478209733963, 0.03209686279296875, -0.0013899088371545076, 0.026136018335819244, 0.029574967920780182, -0.011348534375429153, -0.0021780014503747225, 0.020519066601991653, 0.01662158966064453, 0.00664863595739007, 0.0001226916938321665, 0...
[ "ACT2FN", "LayerNorm", "Linear", "ModelPredictionHeadTransform", "Module", "__init__", "class", "config", "def", "dense", "else", "eps", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "isinstance", "layer_norm_eps", "nn", "return", "self", "str", "super"...
visual_bert/modeling_visual_bert.py:VisualBertLMPredictionHead
[ -0.00033743318635970354, 0.03515048697590828, 0.03377203643321991, 0.018723953515291214, -0.0015794745413586497, 0.024926980957388878, 0.056746214628219604, -0.029292073100805283, 0.00021807517623528838, 0.004077916033565998, 0.019528048112988472, 0.008155832067131996, -0.0002458954695612192...
[ "Linear", "ModelLMPredictionHead", "ModelPredictionHeadTransform", "Module", "Parameter", "__init__", "bias", "class", "config", "decoder", "def", "forward", "hidden_size", "hidden_states", "nn", "return", "self", "super", "torch", "transform", "vocab_size", "zeros" ]
visual_bert/modeling_visual_bert.py:VisualBertPreTrainingHeads
[ -0.0003449733485467732, 0.013582197017967701, 0.02519931085407734, 0.022887447848916054, -0.0015316094504669309, 0.047855570912361145, 0.02820473350584507, -0.008033725433051586, 0.0029187274631112814, 0.015027112327516079, 0.019419651478528976, 0.011096944101154804, -0.00037748392787761986,...
[ "Linear", "ModelLMPredictionHead", "ModelPreTrainingHeads", "Module", "__init__", "class", "config", "def", "forward", "hidden_size", "nn", "pooled_output", "prediction_scores", "predictions", "return", "self", "seq_relationship", "seq_relationship_score", "sequence_output", "s...
visual_bert/modeling_visual_bert.py:VisualBertPreTrainedModel
[ -0.00023175595561042428, 0.04189298674464226, 0.0029296784196048975, 0.021625839173793793, -0.0009907125495374203, 0.029664762318134308, 0.010756307281553745, -0.0037930135149508715, -0.0009270238224416971, 0.011775325983762741, 0.007359578739851713, 0.0018257416086271405, -0.002306944923475...
[ "Embedding", "LayerNorm", "Linear", "Model", "ModelConfig", "ModelEmbeddings", "ModelLMPredictionHead", "ModelPreTrainedModel", "None", "PreTrainedModel", "True", "_init_weights", "and", "arange", "base_model_prefix", "bias", "class", "config", "copy_", "def", "elif", "expa...