identifier
stringlengths
24
102
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
arcee/modeling_arcee.py:ArceeDecoderLayer
[ -0.00015529245138168335, 0.04223954677581787, 0.012649276293814182, 0.003317611524835229, -0.0006635222816839814, 0.03862546756863594, 0.04246542602777481, -0.03501138836145401, 0.00671992776915431, -0.0017364519881084561, -0.003839958691969514, 0.021684473380446434, -0.0015670419670641422, ...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "ModelRMSNorm", "None", "Tensor", "_", "__init__", "attention_mask", "cache_position", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "input_layernorm", "kwa...
arcee/modeling_arcee.py:ArceePreTrainedModel
[ -0.00034975787275470793, 0.027630871161818504, 0.016904963180422783, 0.011250545270740986, -0.0018799485405907035, 0.029612833634018898, 0.03077869303524494, -0.03241089731454849, -0.00408050836995244, 0.003993069287389517, 0.006383080966770649, 0.004459412768483162, -0.004255387466400862, ...
[ "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn", "_supports_flex_attn", "_supp...
arcee/modeling_arcee.py:ArceeModel
[ -0.00009990988473873585, 0.048900384455919266, -0.007661811541765928, -0.004140758421272039, -0.000760547467507422, 0.04078787937760353, 0.04033718630671501, -0.016675706952810287, 0.012732128612697124, 0.011943412013351917, 0.016901055350899696, -0.0022112212609499693, -0.000880263280123472...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "None", "ValueError", "You", "__init__", "and", "arange", "attention_mask", "auto_docstring", "cache_p...
arcee/modeling_arcee.py:ArceeForCausalLM
[ -0.00028262686100788414, 0.03526614233851433, 0.008361488580703735, -0.001301150070503354, -0.0012087186332792044, 0.02741658128798008, 0.0389065183699131, -0.007394513580948114, 0.003640376031398773, 0.026734011247754097, 0.025823917239904404, 0.0026734010316431522, 0.0009385344455949962, ...
[ "CausalLMOutputWithPast", "GenerationMixin", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "cache_position", "can_return_tuple", "class", "colwi...
arcee/modeling_arcee.py:ArceeForSequenceClassification
[ -0.0002121782599715516, 0.015340753830969334, -0.017272552475333214, 0.0278406273573637, -0.0007244244916364551, 0.027954263612627983, 0.015227118507027626, -0.011079433374106884, 0.0032101948745548725, -0.013863496482372284, 0.02909061498939991, 0.008409005589783192, -0.003437465289607644, ...
[ "GenericForSequenceClassification", "ModelForSequenceClassification", "ModelPreTrainedModel", "class", "pass" ]
arcee/modeling_arcee.py:ArceeForQuestionAnswering
[ -0.00014039420057088137, 0.034368500113487244, 0.020329080522060394, 0.027292631566524506, -0.0005580669385381043, 0.03549165278673172, 0.04155668243765831, 0.023586224764585495, -0.001811085152439773, 0.014600996859371662, 0.004239904694259167, 0.009715278632938862, -0.003355421358719468, ...
[ "GenericForQuestionAnswering", "ModelForQuestionAnswering", "ModelPreTrainedModel", "base_model_prefix", "class", "transformer" ]
arcee/modeling_arcee.py:ArceeForTokenClassification
[ -0.00016310509818140417, 0.024394849315285683, -0.0069496952928602695, -0.024848707020282745, -0.0007446102099493146, 0.035854753106832504, 0.0356278270483017, -0.007885776460170746, 0.0015317696379497647, -0.018381234258413315, 0.040166404098272324, 0.024394849315285683, -0.0042832815088331...
[ "GenericForTokenClassification", "ModelForTokenClassification", "ModelPreTrainedModel", "class", "pass" ]
ernie4_5/modeling_ernie4_5.py:Ernie4_5RotaryEmbedding
[ -0.0003337432281114161, 0.04972226917743683, 0.0015173462452366948, -0.005018914584070444, -0.0016851297114044428, 0.04225225746631622, 0.04341944679617882, 0.0062444633804261684, -0.0016778347780928016, 0.020659253001213074, 0.0025969964917749166, -0.004260241519659758, -0.00161218037828803...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
ernie4_5/modeling_ernie4_5.py:Ernie4_5MLP
[ -0.000221405309275724, 0.027072647586464882, 0.022694943472743034, 0.026035822927951813, -0.0009432226070202887, 0.05852300301194191, 0.03386961296200752, -0.004348904360085726, -0.0004446106613613665, -0.003916893620043993, 0.02741825580596924, -0.047002725303173065, -0.0016992406453937292,...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
ernie4_5/modeling_ernie4_5.py:rotate_half
[ 0.000020891084204777144, 0.007994027808308601, 0.02882353775203228, -0.0005594412214122713, 0.00044509003055281937, 0.031300559639930725, 0.017676936462521553, -0.02251838892698288, 0.01655101589858532, 0.02612133137881756, -0.004925897810608149, -0.008894763886928558, 0.00016888792742975056...
[ "Model_half", "def", "dim", "flatten", "return", "stack", "torch", "x", "x1", "x2" ]
ernie4_5/modeling_ernie4_5.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
ernie4_5/modeling_ernie4_5.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
ernie4_5/modeling_ernie4_5.py:apply_rotary_pos_emb
[ -0.00015957531286403537, 0.02848242036998272, 0.025759002193808556, -0.0018865348538383842, -0.0007801459869369864, 0.018156124278903008, 0.04175908863544464, -0.00669507123529911, 0.01123410277068615, 0.03495053946971893, 0.006326274946331978, 0.007007129490375519, -0.0004751798405777663, ...
[ "Model_rotary_pos_emb", "cos", "def", "dim", "float", "k", "k_embed", "q", "q_embed", "repeat_interleave", "return", "rotate_half", "shape", "sin", "unsqueeze", "unsqueeze_dim" ]
ernie4_5/modeling_ernie4_5.py:Ernie4_5Attention
[ -0.00011164446186739951, 0.03240678459405899, 0.027343222871422768, -0.007651601452380419, -0.0004782250907737762, 0.03105650097131729, 0.04388418421149254, -0.005851224530488253, 0.0036007536109536886, 0.009677025489509106, 0.012658899649977684, 0.027230700477957726, -0.0009916138369590044,...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_kwargs", "cache_position", "cla...
ernie4_5/modeling_ernie4_5.py:Ernie4_5RMSNorm
[ -0.00009884802420856431, 0.04179859161376953, 0.03230918198823929, 0.053095508366823196, -0.00046952810953371227, 0.03931327164173126, 0.022028988227248192, -0.02937198430299759, 0.007964326068758965, 0.042476408183574677, 0.019995542243123055, 0.006975846365094185, 0.0028524715453386307, ...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tuple", ...
ernie4_5/modeling_ernie4_5.py:Ernie4_5DecoderLayer
[ -0.00015529245138168335, 0.04223954677581787, 0.012649276293814182, 0.003317611524835229, -0.0006635222816839814, 0.03862546756863594, 0.04246542602777481, -0.03501138836145401, 0.00671992776915431, -0.0017364519881084561, -0.003839958691969514, 0.021684473380446434, -0.0015670419670641422, ...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "ModelRMSNorm", "None", "Tensor", "_", "__init__", "attention_mask", "cache_position", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "input_layernorm", "kwa...
ernie4_5/modeling_ernie4_5.py:Ernie4_5PreTrainedModel
[ -0.00034975787275470793, 0.027630871161818504, 0.016904963180422783, 0.011250545270740986, -0.0018799485405907035, 0.029612833634018898, 0.03077869303524494, -0.03241089731454849, -0.00408050836995244, 0.003993069287389517, 0.006383080966770649, 0.004459412768483162, -0.004255387466400862, ...
[ "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn", "_supports_flex_attn", "_supp...
ernie4_5/modeling_ernie4_5.py:Ernie4_5Model
[ -0.00009990988473873585, 0.048900384455919266, -0.007661811541765928, -0.004140758421272039, -0.000760547467507422, 0.04078787937760353, 0.04033718630671501, -0.016675706952810287, 0.012732128612697124, 0.011943412013351917, 0.016901055350899696, -0.0022112212609499693, -0.000880263280123472...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "None", "ValueError", "You", "__init__", "and", "arange", "attention_mask", "auto_docstring", "cache_p...
ernie4_5/modeling_ernie4_5.py:Ernie4_5ForCausalLM
[ -0.00028262686100788414, 0.03526614233851433, 0.008361488580703735, -0.001301150070503354, -0.0012087186332792044, 0.02741658128798008, 0.0389065183699131, -0.007394513580948114, 0.003640376031398773, 0.026734011247754097, 0.025823917239904404, 0.0026734010316431522, 0.0009385344455949962, ...
[ "CausalLMOutputWithPast", "GenerationMixin", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "cache_position", "can_return_tuple", "class", "colwi...
vjepa2/modeling_vjepa2.py:VJEPA2WithMaskedInputPredictorOutput
[ -0.00021555330022238195, 0.029553350061178207, 0.0355563722550869, -0.002467589220032096, -0.0014935408253222704, 0.054027218371629715, 0.05010216310620308, -0.014892117120325565, 0.018239958211779594, -0.002121260855346918, -0.0011183518217876554, 0.022280454635620117, -0.002626323141157627...
[ "FloatTensor", "Model", "ModelOutput", "None", "attentions", "class", "hidden_states", "last_hidden_state", "masked_hidden_state", "r", "target_hidden_state", "torch" ]
vjepa2/modeling_vjepa2.py:VJEPA2WithMaskedInputModelOutput
[ -0.00026229937793686986, 0.020098913460969925, 0.018956929445266724, -0.002640837337821722, -0.0012704568216577172, 0.037000272423028946, 0.04682133346796036, -0.026836616918444633, 0.007108848541975021, 0.002412440488114953, 0.009935257956385612, 0.019984714686870575, -0.004082591738551855,...
[ "FloatTensor", "Model", "ModelOutput", "ModelWithMaskedInputPredictorOutput", "None", "attentions", "class", "def", "hidden_states", "if", "isinstance", "last_hidden_state", "list", "masked_hidden_state", "output", "predictor_output", "r", "return", "self", "super", "to_tuple...
vjepa2/modeling_vjepa2.py:VJEPA2PatchEmbeddings3D
[ 0, 0.02370988391339779, -0.0021875191014260054, 0.005504080560058355, 0.0005574645474553108, 0.021564705297350883, 0.03454868867993355, -0.041322942823171616, 0.015354973264038563, 0.01377431396394968, 0.004544394556432962, -0.003020187607035041, -0.0013689636252820492, -0.0179517697542905...
[ "Conv3d", "Model", "Module", "__init__", "class", "config", "crop_size", "def", "flatten", "forward", "frames_per_clip", "hidden_size", "in_channels", "in_chans", "kernel_size", "nn", "num_patches", "out_channels", "patch_size", "pixel_values_videos", "proj", "return", "s...
vjepa2/modeling_vjepa2.py:VJEPA2Embeddings
[ -0.00002323625813005492, 0.023008279502391815, -0.007014719303697348, 0.011560257524251938, 0.000099082913948223, 0.012345906347036362, 0.039057958871126175, -0.04422079026699066, 0.01189696416258812, 0.004629714880138636, 0.005920423194766045, 0.0023008279968053102, -0.0015432382933795452, ...
[ "Model", "ModelPatchEmbeddings3D", "Module", "__init__", "class", "config", "def", "dtype", "embeddings", "forward", "hidden_size", "if", "nn", "num_frames", "num_patches", "patch_embeddings", "patch_size", "permute", "pixel_values_videos", "proj", "repeat", "return", "se...
vjepa2/modeling_vjepa2.py:eager_attention_forward
[ 0.0000886371053638868, 0.03635091707110405, 0.031157927587628365, -0.018965695053339005, 0.0004727312480099499, 0.02336844615638256, 0.055542394518852234, -0.019530151039361954, 0.017159437760710716, 0.008805501274764538, 0.026303613558411598, 0.02596493996679783, 0.0018627021927386522, -0...
[ "Model_attention_forward", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "key", "kwargs", "matmul", "module", "nn", "p", "query", "return", "scaling", "softmax", "to", "torch", "training", "tr...
vjepa2/modeling_vjepa2.py:rotate_queries_or_keys
[ -0.00007225864101201296, 0.011786970309913158, 0.008685136213898659, 0.020979680120944977, -0.00020620148279704154, 0.013930056244134903, 0.05865286663174629, -0.002354574156925082, 0.014663216657936573, 0.022671589627861977, 0.016919096931815147, 0.025942614302039146, -0.0003225202672183513...
[ "B", "D", "Model_queries_or_keys", "N", "arange", "cos", "def", "device", "dim", "dtype", "emb_cos", "emb_sin", "flatten", "freq", "num_heads", "omega", "pos", "repeat", "return", "sin", "size", "squeeze", "stack", "torch", "unbind", "unflatten", "unsqueeze", "x...
vjepa2/modeling_vjepa2.py:VJEPA2RopeAttention
[ -0.00017485758871771395, 0.028516875579953194, 0.016019441187381744, -0.012667855247855186, -0.0008556483080610633, 0.0270399060100317, 0.03794676065444946, -0.03022107109427452, 0.003777633886784315, 0.024085966870188713, 0.01067962683737278, 0.01511053554713726, -0.0013136508641764522, -...
[ "ALL_ATTENTION_FUNCTIONS", "Dropout", "False", "Linear", "Model", "Module", "None", "The", "ValueError", "_", "__init__", "_attn_implementation", "_get_frame_pos", "_get_height_pos", "a", "all_head_size", "apply_rotary_embeddings", "arange", "attention", "attention_head_size", ...
vjepa2/modeling_vjepa2.py:drop_path
[ 0, 0.017753854393959045, 0.03775503486394882, -0.02123720571398735, 0, 0.043598074465990067, 0.0339345820248127, -0.027304979041218758, 0.014832334592938423, 0.0017486985307186842, 0.020113544538617134, -0.04944111406803131, 0.0016574009787291288, -0.018315685912966728, -0.05034004524350...
[ "False", "Model_path", "Model_prob", "def", "device", "div", "dtype", "floor_", "if", "input", "keep_prob", "ndim", "not", "or", "output", "rand", "random_tensor", "return", "shape", "torch", "training" ]
vjepa2/modeling_vjepa2.py:VJEPA2DropPath
[ -0.00014069548342376947, 0.013704978860914707, 0.03964250162243843, -0.018009021878242493, -0.0007397573790512979, 0.034658871591091156, 0.04372001439332962, -0.025484465062618256, 0.0043323589488863945, -0.0006123350467532873, 0.0141580356284976, -0.04236084222793579, -0.0013733294326812029...
[ "Model", "Module", "None", "__init__", "class", "def", "drop_path", "drop_prob", "extra_repr", "f", "forward", "hidden_states", "nn", "p", "return", "self", "super", "training" ]
vjepa2/modeling_vjepa2.py:VJEPA2MLP
[ -0.00023544428404420614, 0.029451940208673477, 0.0415523499250412, 0.01586751826107502, -0.0005957453977316618, 0.03470306098461151, 0.03675784915685654, -0.03310489282011986, 0.0015125512145459652, -0.012043331749737263, 0.027282997965812683, -0.027168843895196915, 0.00003032236963917967, ...
[ "ACT2FN", "Linear", "Model", "Module", "__init__", "activation", "class", "config", "def", "fc1", "fc2", "forward", "hidden_act", "hidden_features", "hidden_size", "hidden_state", "in_features", "int", "mlp_ratio", "nn", "out_features", "return", "self", "super" ]
vjepa2/modeling_vjepa2.py:VJEPA2Layer
[ -0.00013465568190440536, 0.03762614354491234, 0.023431850597262383, 0.00018482153245713562, -0.0003819644916802645, 0.04979268088936806, 0.03266940638422966, 0.0036612264811992645, 0.0045061251148581505, -0.0012673476012423635, 0.011434292420744896, -0.0035485734697431326, 0.0005527043831534...
[ "False", "GradientCheckpointingLayer", "Identity", "LayerNorm", "Model", "ModelDropPath", "ModelMLP", "ModelRopeAttention", "None", "__init__", "attention", "attention_output", "class", "config", "def", "drop_path", "drop_path_rate", "else", "eps", "forward", "hidden_size", ...
vjepa2/modeling_vjepa2.py:VJEPA2Encoder
[ -0.00009500525629846379, 0.03963478282094002, 0.013793354853987694, 0.02803710661828518, -0.00010160284000448883, 0.033104050904512405, 0.03423004224896431, -0.026460722088813782, 0.003349814796820283, 0.012329570949077606, 0.022519763559103012, -0.0021253027953207493, -0.0011048759333789349...
[ "BaseModelOutput", "False", "LayerNorm", "Model", "ModelEmbeddings", "ModelLayer", "Module", "ModuleList", "None", "__init__", "all_hidden_states", "all_self_attentions", "attentions", "can_return_tuple", "class", "config", "def", "drop_path_rate", "drop_path_rates", "else", ...
vjepa2/modeling_vjepa2.py:apply_masks
[ 0.00008781287760939449, -0.0035160451661795378, 0.02530422806739807, -0.03253400698304176, 0.0005612963577732444, 0.053319625556468964, -0.0034172004088759422, -0.08269060403108597, 0.01412066351622343, -0.001715660560876131, -0.0016026953235268593, 0.014572524465620518, -0.0041232337243855,...
[ "Model_masks", "all_masked_tensors", "cat", "def", "device", "dim", "for", "gather", "in", "index", "mask", "mask_keep", "masks", "repeat", "return", "size", "tensor", "to", "torch", "unsqueeze" ]
vjepa2/modeling_vjepa2.py:VJEPA2PredictorEmbeddings
[ -0.00022927654208615422, 0.028778649866580963, 0.005118732340633869, -0.00807622168213129, -0.0009810903575271368, 0.04026735946536064, 0.015128697268664837, -0.0398123636841774, 0.007621223572641611, -0.011488710530102253, 0.011090586893260479, 0.019564932212233543, -0.0030854579526931047, ...
[ "B", "Linear", "Model", "Module", "Parameter", "__init__", "apply_masks", "cat", "class", "cm", "config", "context", "context_mask", "crop_size", "def", "dim", "else", "embeddings", "forward", "frames_per_clip", "hidden_size", "hidden_states", "if", "len", "mask_index...
vjepa2/modeling_vjepa2.py:VJEPA2Predictor
[ -0.0001844085636548698, 0.040206361562013626, 0.012310374528169632, 0.004997560288757086, -0.0009458659333176911, 0.027444230392575264, 0.027670107781887054, -0.021797269582748413, 0.004461098928004503, 0.002188197337090969, -0.005166969262063503, 0.012988009490072727, -0.0037834637332707644...
[ "BaseModelOutput", "D", "False", "LayerNorm", "Linear", "Model", "ModelEmbeddings", "ModelLayer", "Module", "ModuleList", "N_ctxt", "None", "_", "__init__", "all_hidden_states", "all_self_attentions", "apply_masks", "argsort", "attentions", "can_return_tuple", "class", "con...
vjepa2/modeling_vjepa2.py:VJEPA2PoolerSelfAttention
[ -0.00018812237249221653, 0.03952699527144432, 0.03748249635100365, 0.004997665993869305, -0.0007027967949397862, 0.02055858075618744, 0.03702816367149353, -0.009370624087750912, -0.002214874839410186, 0.034983664751052856, 0.008291582576930523, -0.00371985393576324, -0.002725999802350998, ...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "Model", "Module", "None", "ValueError", "__init__", "_attn_implementation", "and", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "batch_size", "be", "by", "class", "config", "contigu...
vjepa2/modeling_vjepa2.py:VJEPA2PoolerCrossAttention
[ -0.00019428564701229334, 0.037473179399967194, 0.03701895847916603, 0.007608190644532442, -0.0005535810487344861, 0.022597461938858032, 0.04246960207819939, -0.002114963484928012, 0.0019446307560428977, 0.03520207479596138, 0.008289521560072899, 0.0019162420649081469, -0.002228518482297659, ...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "Model", "Module", "None", "ValueError", "__init__", "_attn_implementation", "and", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "batch_size", "be", "by", "class", "config", "contigu...
vjepa2/modeling_vjepa2.py:VJEPA2PoolerSelfAttentionLayer
[ -0.00005305796366883442, 0.024247050285339355, 0.024247050285339355, 0.01784852333366871, 0, 0.040860772132873535, 0.031206851825118065, 0.007633330766111612, 0.005556616000831127, 0.00256783002987504, 0.011899015866219997, -0.004013111349195242, 0.001304962788708508, -0.014368622563779354...
[ "False", "GradientCheckpointingLayer", "LayerNorm", "Model", "ModelMLP", "ModelPoolerSelfAttention", "__init__", "attention_mask", "attn_weights", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "if", "layer_norm1", "layer_norm2", "layer_norm_eps", ...
vjepa2/modeling_vjepa2.py:VJEPA2PoolerCrossAttentionLayer
[ -0.00010809469677042216, 0.02936645969748497, 0.03004414774477482, 0.025413282215595245, -0.0005506211309693754, 0.0467604398727417, 0.03636923059821129, 0.020443573594093323, 0.006720401346683502, 0.014909125864505768, 0.009600573219358921, 0.007680458948016167, 0.0013200788525864482, -0....
[ "False", "GradientCheckpointingLayer", "LayerNorm", "Model", "ModelMLP", "ModelPoolerCrossAttention", "None", "__init__", "attention_mask", "attn_weights", "class", "config", "cross_attn", "def", "eps", "forward", "hidden_size", "hidden_state", "if", "layer_norm1", "layer_nor...
vjepa2/modeling_vjepa2.py:VJEPA2AttentivePooler
[ 0.000055787808378227055, 0.026539184153079987, 0.01990438811480999, 0.014394133351743221, 0.00028640672098845243, 0.026651637628674507, 0.04003368318080902, 0.003654760541394353, 0.008715198375284672, 0.012651094235479832, 0.011357870884239674, -0.002656729659065604, 0.001714926096610725, ...
[ "Model", "ModelPoolerCrossAttentionLayer", "ModelPoolerSelfAttentionLayer", "Module", "ModuleList", "None", "Parameter", "_", "__init__", "attention_mask", "class", "config", "cross_attention_layer", "def", "for", "forward", "hidden_size", "hidden_state", "in", "layer", "nn",...
vjepa2/modeling_vjepa2.py:VJEPA2PreTrainedModel
[ -0.00010589890734991059, 0.045930370688438416, -0.008401896804571152, 0.006861548870801926, -0.0004043412918690592, 0.03002277761697769, 0.03719239681959152, -0.017699996009469032, 0.0016593746840953827, 0.01187468133866787, 0.010418351739645004, -0.0009382118005305529, -0.001428322517313063...
[ "Conv2d", "Conv3d", "LayerNorm", "Linear", "Model", "ModelAttentivePooler", "ModelConfig", "ModelLayer", "ModelPoolerCrossAttentionLayer", "ModelPoolerSelfAttentionLayer", "ModelPredictorEmbeddings", "None", "PreTrainedModel", "True", "_init_weights", "_no_split_modules", "_supports_...
vjepa2/modeling_vjepa2.py:VJEPA2Model
[ -0.00015351359616033733, 0.049180496484041214, -0.0036211665719747543, 0.012238981202244759, -0.0006561609916388988, 0.034583546221256256, 0.04289257898926735, -0.027285069227218628, 0.0037615217734128237, 0.010891570709645748, 0.02784649096429348, 0.03009217418730259, -0.0011789844138547778...
[ "B", "False", "Model", "ModelEncoder", "ModelPreTrainedModel", "ModelPredictor", "ModelWithMaskedInputModelOutput", "ModelWithMaskedInputPredictorOutput", "N", "None", "True", "__init__", "and", "apply_masks", "arange", "attentions", "auto_docstring", "can_return_tuple", "class",...
vjepa2/modeling_vjepa2.py:VJEPA2ForVideoClassification
[ -0.00013613977353088558, 0.042894501239061356, 0.005250108428299427, 0.014744984917342663, -0.0001797743170754984, 0.03351132944226265, 0.04490518197417259, -0.002066531917080283, 0.006115817464888096, 0.00982998963445425, 0.03976677730679512, 0.01351623609662056, 0.0015219729393720627, -0...
[ "ImageClassifierOutput", "Linear", "Model", "ModelAttentivePooler", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "attentions", "auto_docstring", "can_return_tuple", "class", "classifier", "config", "def", "forward", "hidden_size", "hidden_states", "if", "...
whisper/modeling_whisper.py:sinusoids
[ -0.00021466238831635565, 0.012716528959572315, 0.021799763664603233, 0.021345602348446846, -0.0005783466040156782, 0.01964249648153782, 0.05518065392971039, -0.011808205395936966, 0.01044572051614523, 0.010559260845184326, 0.026227841153740883, -0.014135784469544888, -0.0017882619285956025, ...
[ "Model", "Number", "ValueError", "arange", "be", "by", "cat", "channels", "cos", "def", "dim", "divisible", "embeddings", "exp", "f", "for", "got", "has", "if", "inv_timescales", "length", "log", "log_timescale_increment", "math", "max_timescale", "of", "positiona...
whisper/modeling_whisper.py:shift_tokens_right
[ -0.0001357423752779141, 0.026739485561847687, 0.00459761219099164, -0.04174518957734108, -0.0005782272783108056, 0.05189942196011543, 0.04039129242300987, -0.057089366018772125, 0.006572046782821417, 0.007784913759678602, 0.020759768784046173, -0.009872172959148884, -0.0006698974757455289, ...
[ "Model_tokens_right", "Modeled_input_ids", "None", "clone", "decoder_start_token_id", "def", "if", "input_ids", "is", "masked_fill_", "new_zeros", "pad_token_id", "return", "shape" ]
whisper/modeling_whisper.py:_compute_mask_indices
[ 0, -0.008906038478016853, 0.00551726296544075, -0.014171244576573372, 0, 0.024309566244482994, -0.0006616514874622226, -0.05265205353498459, 0.013331051915884018, -0.0200525913387537, 0.040777336806058884, 0.030695026740431786, -0.006189417093992233, -0.012434846721589565, 0.005489256698...
[ "False", "None", "ValueError", "_", "_compute_mask_indices", "and", "append", "arange", "array", "attention_mask", "batch_size", "be", "bigger", "bool", "broadcast_to", "but", "choice", "compute_num_masked_span", "concatenate", "def", "detach", "dtype", "dummy_mask_idx", ...
whisper/modeling_whisper.py:WhisperPositionalEmbedding
[ -0.0001500328362453729, 0.01022341474890709, 0.008698374964296818, -0.0034878223668783903, -0.0009884516475722194, 0.02135055512189865, 0.0476716123521328, -0.014403153210878372, 0.010675277560949326, -0.016493022441864014, 0.014290186576545238, 0.022819112986326218, 0.0019769032951444387, ...
[ "Embedding", "ModelPositionalEmbedding", "None", "__init__", "class", "def", "else", "embedding_dim", "forward", "if", "input_ids", "is", "nn", "num_positions", "padding_idx", "past_key_values_length", "position_ids", "return", "self", "shape", "super", "weight" ]
whisper/modeling_whisper.py:eager_attention_forward
[ 0.00002179629882448353, 0.02998466230928898, 0.02536296658217907, -0.007439803332090378, 0.00018669961718842387, 0.03336638957262039, 0.05906752869486809, -0.02344665303826332, 0.019501302391290665, 0.010934256948530674, 0.02446117252111435, 0.02975921332836151, 0.002818107372149825, -0.00...
[ "Model_attention_forward", "None", "and", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "functional", "if", "is", "key", "kwargs", "matmul", "module", "ndim", "nn", "not", "p", "query", "return", "scaling", "shape", "size", ...
whisper/modeling_whisper.py:WhisperAttention
[ -0.00017403865058440715, 0.044101569801568985, 0.04206611216068268, -0.005258264020085335, -0.0007951004081405699, 0.023860080167651176, 0.03392428532242775, -0.023520836606621742, 0.002798753557726741, 0.01396549679338932, 0.011986580677330494, 0.013117389753460884, -0.0013287011533975601, ...
[ "ALL_ATTENTION_FUNCTIONS", "EncoderDecoderCache", "False", "Instantiating", "Linear", "ModelAttention", "Module", "None", "Please", "True", "ValueError", "__class__", "__init__", "__name__", "_attn_implementation", "a", "and", "attention_interface", "attention_mask", "attn_outp...
whisper/modeling_whisper.py:WhisperEncoderLayer
[ -0.00013405329082161188, 0.04131663590669632, 0.03793002665042877, 0.008410080336034298, -0.0005714903818443418, 0.03183412924408913, 0.01986810937523842, -0.010216272436082363, 0.002695176750421524, 0.015465517528355122, 0.009200289845466614, 0.01862635277211666, 0.0018061917508020997, 0....
[ "ACT2FN", "False", "GradientCheckpointingLayer", "LayerNorm", "Linear", "ModelAttention", "ModelEncoderLayer", "__init__", "activation_dropout", "activation_fn", "activation_function", "attention_dropout", "attention_mask", "attn_weights", "clamp", "clamp_value", "class", "config",...
whisper/modeling_whisper.py:WhisperDecoderLayer
[ -0.00014831866428721696, 0.05198216065764427, 0.022600939497351646, -0.008870868943631649, -0.0007839701138436794, 0.029381221160292625, 0.0402296744287014, -0.03299737349152565, 0.005056960508227348, -0.0032347594387829304, -0.0032206338364630938, 0.020566854625940323, -0.000354905379936099...
[ "ACT2FN", "False", "GradientCheckpointingLayer", "LayerNorm", "Linear", "ModelAttention", "ModelDecoderLayer", "None", "True", "__init__", "activation_dropout", "activation_fn", "activation_function", "attention_dropout", "attention_mask", "cache_position", "class", "config", "cr...
whisper/modeling_whisper.py:WhisperPreTrainedModel
[ -0.0002818558714352548, 0.0463528037071228, 0.004252812825143337, 0.010389421135187149, -0.001255864161066711, 0.015298708342015743, 0.03425084054470062, -0.04680948331952095, -0.0007099913782440126, -0.012387387454509735, 0.00964732002466917, 0.001398575957864523, -0.0031253891065716743, ...
[ "ModelConfig", "ModelDecoderLayer", "ModelEncoder", "ModelEncoderLayer", "ModelForAudioClassification", "ModelPreTrainedModel", "PreTrainedModel", "True", "_can_compile_fullgraph", "_get_feat_extract_output_lengths", "_init_weights", "_no_split_modules", "_supports_flash_attn", "_supports_...
whisper/modeling_whisper.py:WhisperEncoder
[ -0.00012686708942055702, 0.038793135434389114, 0.009867439977824688, 0.008006722666323185, -0.000493372033815831, 0.033154599368572235, 0.006963593885302544, -0.03766543045639992, 0.004877334926277399, -0.00952912773936987, 0.0237946268171072, 0.0034395079128444195, -0.00005682588380295783, ...
[ "BaseModelOutput", "Conv1d", "Embedding", "False", "LayerNorm", "Make", "Model", "ModelEncoder", "ModelEncoderLayer", "ModelPreTrainedModel", "ModuleList", "None", "True", "ValueError", "_", "__init__", "_freeze_parameters", "_requires_grad", "all_attentions", "all_positions", ...
whisper/modeling_whisper.py:WhisperDecoder
[ -0.00018432148499414325, 0.06851087510585785, -0.0012264468241482973, -0.017127718776464462, -0.0011839111102744937, 0.022458864375948906, 0.043556585907936096, -0.029264580458402634, 0.0027790009044110775, -0.00947128888219595, 0.003034215187653899, 0.011456289328634739, -0.0037431439850479...
[ "BaseModelOutputWithPastAndCrossAttentions", "DynamicCache", "Embedding", "EncoderDecoderCache", "False", "LayerNorm", "ModelDecoder", "ModelDecoderLayer", "ModelPositionalEmbedding", "ModelPreTrainedModel", "ModuleList", "None", "Setting", "True", "__init__", "all_cross_attentions", ...
whisper/modeling_whisper.py:WhisperModel
[ 0, 0.04693732038140297, 0.003031368600204587, -0.004386404529213905, 0, 0.026150792837142944, 0.013019518926739693, -0.03799687698483467, 0.009443341754376888, -0.023245148360729218, 0.023356905207037926, -0.001089616329409182, -0.0015156843001022935, -0.006258309353142977, -0.0238039270...
[ "BaseModelOutput", "ModelDecoder", "ModelEncoder", "ModelModel", "ModelPreTrainedModel", "None", "Seq2SeqModelOutput", "True", "__init__", "_compute_mask_indices", "_freeze_parameters", "_mask_input_features", "and", "apply_spec_augment", "attention_mask", "attentions", "auto_docstri...
whisper/modeling_whisper.py:WhisperForConditionalGeneration
[ -0.00021921122970525175, 0.04327176883816719, 0.006254122592508793, 0.010029133409261703, -0.0008909307653084397, 0.0349329374730587, 0.016226911917328835, -0.013635113835334778, -0.006479496601969004, -0.0019720206037163734, 0.025354551151394844, -0.017579155042767525, -0.000129413849208503...
[ "CrossEntropyLoss", "Labels", "Linear", "ModelForConditionalGeneration", "ModelGenerationMixin", "ModelModel", "ModelPreTrainedModel", "None", "Seq2SeqLMOutput", "ValueError", "__init__", "_freeze_parameters", "_tied_weights_keys", "allowed", "and", "attention_mask", "auto_docstring"...
whisper/modeling_whisper.py:WhisperDecoderWrapper
[ -0.00006856409163447097, 0.06439609825611115, 0.0004891196731477976, -0.008720304816961288, -0.0003930425737053156, 0.018893994390964508, 0.022471554577350616, -0.03152725473046303, -0.0011529248440638185, -0.006204832345247269, -0.0015581954503431916, 0.005897385533899069, -0.00265522091649...
[ "False", "ModelDecoder", "ModelDecoderWrapper", "ModelPreTrainedModel", "__init__", "args", "class", "config", "decoder", "def", "embed_tokens", "forward", "get_input_embeddings", "is_encoder_decoder", "kwargs", "post_init", "return", "self", "set_input_embeddings", "super", ...
whisper/modeling_whisper.py:WhisperForCausalLM
[ -0.0001632793719181791, 0.057530518621206284, 0.015618637204170227, 0.00035991688491776586, -0.0007900614873506129, 0.020899759605526924, 0.02797871083021164, -0.011348794214427471, -0.0017486694268882275, 0.010449879802763462, 0.015843365341424942, 0, -0.0021349217277020216, -0.0027950617...
[ "BaseModelOutput", "CausalLMOutputWithCrossAttentions", "CrossEntropyLoss", "False", "GenerationMixin", "Linear", "ModelDecoderWrapper", "ModelForCausalLM", "ModelPreTrainedModel", "None", "__init__", "_tied_weights_keys", "attention_mask", "attentions", "auto_docstring", "cache_positi...
whisper/modeling_whisper.py:WhisperForAudioClassification
[ -0.00019318574049975723, 0.051478732377290726, 0.00668773939833045, 0.027987346053123474, -0.0005374076426960528, 0.005563749466091394, 0.018096236512064934, -0.02011941745877266, -0.0032033708412200212, -0.03551807627081871, 0.03529328107833862, 0.004074463155120611, -0.0019669821485877037,...
[ "CrossEntropyLoss", "Linear", "ModelEncoder", "ModelForAudioClassification", "ModelPreTrainedModel", "None", "Parameter", "SequenceClassifierOutput", "True", "_HIDDEN_STATES_START_POSITION", "__init__", "_freeze_parameters", "attentions", "auto_docstring", "class", "classifier", "cla...
ibert/modeling_ibert.py:IBertEmbeddings
[ -0.00019140304357279092, 0.017048101872205734, 0.0067458548583090305, -0.02133835293352604, -0.0008714572177268565, 0.03274138644337654, 0.017612608149647713, -0.0033305895049124956, -0.004600729327648878, 0.0023568153847008944, 0.027999531477689743, 0.017838411033153534, 0.00172174535691738...
[ "Dropout", "False", "IntLayerNorm", "LayerNorm", "Model", "Module", "None", "QuantAct", "QuantEmbedding", "__init__", "act_bit", "arange", "class", "config", "create_position_ids_from_input_ids", "create_position_ids_from_inputs_embeds", "def", "device", "dropout", "dtype", "...
ibert/modeling_ibert.py:IBertSelfAttention
[ -0.00010076956095872447, 0.03297530487179756, 0.02871319092810154, -0.016711974516510963, -0.00021468297927640378, 0.020413285121321678, 0.02871319092810154, -0.01149649266153574, -0.003028344362974167, 0.020861927419900894, 0.015366043895483017, 0.00897287204861641, 0.001451081712730229, ...
[ "Dropout", "False", "IntSoftmax", "Model", "Module", "None", "QuantAct", "QuantLinear", "The", "True", "ValueError", "_", "__init__", "a", "act_bit", "all_head_size", "and", "attention", "attention_head_size", "attention_mask", "attention_probs", "attention_probs_dropout_pr...
ibert/modeling_ibert.py:IBertSelfOutput
[ -0.0001452157157473266, 0.01757374219596386, 0.028613656759262085, 0.0013025410007685423, -0.000413644767832011, 0.04190661758184433, 0.026135308668017387, -0.030190788209438324, 0.0012180518824607134, 0.03740052878856659, 0.02444552630186081, -0.017799045890569687, 0.003802011488005519, -...
[ "Dropout", "IntLayerNorm", "LayerNorm", "Model", "Module", "QuantAct", "QuantLinear", "True", "__init__", "act_bit", "bias_bit", "class", "config", "def", "dense", "dropout", "eps", "force_dequant", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "hidden...
ibert/modeling_ibert.py:IBertAttention
[ 0.00006814648804720491, 0.037592239677906036, 0.02588687092065811, 0.001603860524483025, 0.00039041342097334564, 0.030839143320918083, 0.034440793097019196, -0.02307308092713356, 0.008722750470042229, 0.02025929093360901, 0.017783155664801598, 0.0128308841958642, 0.004192547872662544, -0.0...
[ "False", "Model", "ModelSelfAttention", "ModelSelfOutput", "Module", "None", "__init__", "attention_mask", "attention_output", "attention_output_scaling_factor", "class", "config", "def", "forward", "hidden_states", "hidden_states_scaling_factor", "nn", "output", "output_attentio...
ibert/modeling_ibert.py:IBertIntermediate
[ -0.00015019590500742197, 0.024200977757573128, 0.029176879674196243, -0.015719326213002205, -0.00019702168356161565, 0.023183178156614304, 0.025105686858296394, -0.022617734968662262, -0.0037036542780697346, 0.008142384700477123, 0.04071192443370819, -0.01865963265299797, 0.00025975055177696...
[ "BERT", "I", "IntGELU", "Model", "Module", "QuantAct", "QuantLinear", "True", "ValueError", "__init__", "act_bit", "bias_bit", "class", "config", "def", "dense", "for", "force_dequant", "forward", "gelu", "hidden_act", "hidden_size", "hidden_states", "hidden_states_scal...
ibert/modeling_ibert.py:IBertOutput
[ -0.0002049112954409793, 0.017523448914289474, 0.026454754173755646, 0.00339163513854146, -0.0005158112035132945, 0.040925730019807816, 0.02803751640021801, -0.031655263155698776, 0.0009680291987024248, 0.03233359009027481, 0.024532828480005264, -0.016732066869735718, 0.003759062383323908, ...
[ "Dropout", "IntLayerNorm", "LayerNorm", "Model", "Module", "QuantAct", "QuantLinear", "True", "__init__", "act_bit", "bias_bit", "class", "config", "def", "dense", "dropout", "eps", "force_dequant", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "hidden...
ibert/modeling_ibert.py:IBertLayer
[ -0.00009294810297433287, 0.03030458837747574, 0.02682517282664776, -0.006930771749466658, -0.00009908618085319176, 0.03254937380552292, 0.030753545463085175, -0.01975410245358944, -0.0013959752395749092, 0.014366619288921356, 0.024917105212807655, 0.006201216485351324, 0.004461508709937334, ...
[ "False", "Model", "ModelAttention", "ModelIntermediate", "ModelOutput", "Module", "None", "QuantAct", "__init__", "act_bit", "attention", "attention_mask", "attention_output", "attention_output_scaling_factor", "class", "config", "def", "feed_forward_chunk", "forward", "hidden_...
ibert/modeling_ibert.py:IBertEncoder
[ -0.000035550459870137274, 0.015055839903652668, 0.016179408878087997, 0.016404123976826668, 0, 0.03617895767092705, 0.02831396646797657, -0.030111679807305336, 0.005702118389308453, 0.0173029787838459, 0.024606185033917427, -0.013595198281109333, 0.0013693510554730892, 0.001355306478217244...
[ "BaseModelOutputWithPastAndCrossAttentions", "False", "Model", "ModelLayer", "Module", "ModuleList", "None", "True", "_", "__init__", "all_cross_attentions", "all_hidden_states", "all_self_attentions", "attention_mask", "attentions", "class", "config", "cross_attentions", "def", ...
ibert/modeling_ibert.py:IBertPooler
[ -0.0002590577641967684, 0.023847509175539017, 0.030206844210624695, 0.00222860649228096, -0.0008658916922286153, 0.02362038940191269, 0.04406111314892769, -0.01987292431294918, -0.005422469228506088, 0.015444100834429264, 0.023733949288725853, -0.018055971711874008, -0.000301642605336383, ...
[ "Linear", "Model", "Module", "Tanh", "__init__", "activation", "class", "config", "def", "dense", "first_token_tensor", "forward", "hidden_size", "hidden_states", "nn", "pooled_output", "quant_mode", "return", "self", "super" ]
ibert/modeling_ibert.py:IBertPreTrainedModel
[ -0.0002685419167391956, 0.03915376961231232, 0.0139428386464715, -0.029365327209234238, -0.0009532349067740142, 0.035056281834840775, 0.016276130452752113, -0.011666457168757915, 0.00022408134827855974, -0.0004766174533870071, 0.014511934481561184, -0.004865766502916813, -0.00132314697839319...
[ "BERT", "Embedding", "False", "I", "IntLayerNorm", "LayerNorm", "Linear", "Model", "ModelConfig", "ModelEmbeddings", "ModelLMHead", "None", "NotImplementedError", "PreTrainedModel", "QuantAct", "QuantEmbedding", "QuantLinear", "_init_weights", "_is_hf_initialized", "act_scaling...
ibert/modeling_ibert.py:IBertModel
[ -0.00006695825140923262, 0.01814962364733219, 0.007282256614416838, 0.015684859827160835, -0.0002870889729820192, 0.02655222825706005, 0.016357067972421646, -0.010251176543533802, 0.005181605461984873, 0.002996928757056594, 0.023079151287674904, 0.009298881515860558, 0.00006301952817011625, ...
[ "BaseModelOutputWithPoolingAndCrossAttentions", "Model", "ModelEmbeddings", "ModelEncoder", "ModelPooler", "ModelPreTrainedModel", "None", "True", "__init__", "add_pooling_layer", "and", "attention_mask", "attentions", "auto_docstring", "batch_size", "class", "config", "cross_atten...
ibert/modeling_ibert.py:IBertForMaskedLM
[ 0.000010320354704163037, 0.028033124282956123, 0.011457983404397964, -0.015685200691223145, 0.00008821187657304108, 0.039157379418611526, 0.013627213425934315, 0.0020301768090575933, 0.006007098127156496, 0.009288753382861614, 0.019801175221800804, 0.03715501353144646, -0.0004380175669211894...
[ "CrossEntropyLoss", "False", "MaskedLMOutput", "Model", "ModelLMHead", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "_tied_weights_keys", "add_pooling_layer", "attention_mask", "attentions", "auto_docstring", "bias", "class", "config", "decoder", "def", "else", "...
ibert/modeling_ibert.py:IBertLMHead
[ -0.00009366370795760304, 0.05718674883246422, 0.009512363001704216, 0.04007575288414955, -0.0003535475116223097, 0.01508469320833683, 0.05155813321471214, -0.02251446805894375, 0.0054316152818500996, 0.025216205045580864, 0.018574437126517296, 0.005741189233958721, 0.0002040373656200245, 0...
[ "LayerNorm", "Linear", "Model", "Module", "Parameter", "__init__", "bias", "class", "config", "decoder", "def", "dense", "eps", "features", "forward", "gelu", "hidden_size", "kwargs", "layer_norm", "layer_norm_eps", "nn", "return", "self", "super", "torch", "vocab_s...
ibert/modeling_ibert.py:IBertForSequenceClassification
[ -0.0002467058366164565, 0.024698778986930847, 0.0070487381890416145, 0.009247944690287113, -0.0008282267372123897, 0.03496174141764641, 0.018608668819069862, 0.022555962204933167, -0.0028053978458046913, 0.009981013834476471, 0.046916402876377106, 0.00936072412878275, 0.0009938720613718033, ...
[ "BCEWithLogitsLoss", "CrossEntropyLoss", "False", "MSELoss", "Model", "ModelClassificationHead", "ModelModel", "ModelPreTrainedModel", "None", "SequenceClassifierOutput", "__init__", "add_pooling_layer", "and", "attention_mask", "attentions", "auto_docstring", "class", "classifier"...
ibert/modeling_ibert.py:IBertForMultipleChoice
[ -0.00019355631957296282, 0.053623054176568985, 0.02025005593895912, 0.028960974887013435, -0.0008732129936106503, 0.03642747551202774, 0.048419129103422165, 0.008937175385653973, 0.0013646162115037441, 0.011708831414580345, 0.04117888584733009, 0, -0.00031994242453947663, -0.01617741957306...
[ "CrossEntropyLoss", "Dropout", "Linear", "Model", "ModelModel", "ModelPreTrainedModel", "MultipleChoiceModelOutput", "None", "__init__", "attention_mask", "attentions", "auto_docstring", "class", "classifier", "config", "def", "dropout", "else", "flat_attention_mask", "flat_inp...
ibert/modeling_ibert.py:IBertForTokenClassification
[ -0.0001549546286696568, 0.03561139106750488, 0.010480566881597042, -0.019383415579795837, -0.0005740364431403577, 0.034935224801301956, 0.042823825031518936, 0.025694293901324272, -0.0008346419781446457, 0.011889245361089706, 0.04778237268328667, 0.014988338574767113, -0.0003504087508190423,...
[ "CrossEntropyLoss", "Dropout", "False", "Linear", "Model", "ModelModel", "ModelPreTrainedModel", "None", "TokenClassifierOutput", "__init__", "add_pooling_layer", "attention_mask", "attentions", "auto_docstring", "class", "classifier", "config", "def", "dropout", "else", "for...
ibert/modeling_ibert.py:IBertClassificationHead
[ -0.00029584462754428387, 0.03490253910422325, 0.035130660980939865, 0.005503423046320677, -0.0009338710224255919, 0.020188722759485245, 0.05269598960876465, 0.0035643933806568384, -0.004163211211562157, 0.004476877860724926, 0.020873086526989937, -0.007128786761313677, 0.00013901133206672966...
[ "Dropout", "Linear", "Model", "Module", "__init__", "class", "config", "def", "dense", "dropout", "features", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "kwargs", "nn", "num_labels", "out_proj", "return", "self", "super", "tanh", "torch" ]
ibert/modeling_ibert.py:IBertForQuestionAnswering
[ -0.00013837010192219168, 0.02264365553855896, 0.005212524440139532, 0.015917817130684853, -0.0004974317853339016, 0.04528731107711792, 0.023204142227768898, 0.028584811836481094, 0.003194773104041815, 0.029817882925271988, 0.02454930916428566, 0.02791222743690014, 0.0009318088414147496, -0...
[ "CrossEntropyLoss", "False", "Linear", "Model", "ModelModel", "ModelPreTrainedModel", "None", "QuestionAnsweringModelOutput", "__init__", "add_pooling_layer", "and", "attention_mask", "attentions", "auto_docstring", "clamp", "class", "config", "contiguous", "def", "dim", "els...
ibert/modeling_ibert.py:create_position_ids_from_input_ids
[ -0.00002266296542074997, -0.005494897719472647, 0.006387469824403524, -0.023318449035286903, -0.0003870137152262032, 0.02343001961708069, 0.023318449035286903, -0.03324831277132034, 0.015843156725168228, -0.007531078066676855, 0.023318449035286903, 0.018855588510632515, 0.0014295101864263415...
[ "Model_position_ids_from_input_ids", "cumsum", "def", "dim", "incremental_indices", "input_ids", "int", "long", "mask", "ne", "padding_idx", "past_key_values_length", "return", "torch", "type_as" ]
deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py:BaseModelOutputWithHighResVisionEncodings
[ -0.00027530002989806235, 0.014740247279405594, 0.038923464715480804, 0.027062172070145607, -0.0013818981824442744, 0.03132302314043045, 0.052051495760679245, -0.035929352045059204, 0.011630975641310215, 0.013531086035072803, 0.013243190944194794, 0.02418321743607521, -0.00015294445620384067,...
[ "ModelModelOutputWithHighResVisionEncodings", "ModelModelOutputWithPooling", "None", "class", "high_res_vision_attentions", "high_res_vision_hidden_states", "high_res_vision_last_hidden_state", "r" ]
deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py:DeepseekVLHybridBaseModelOutputWithPast
[ -0.00023066798166837543, 0.013790011405944824, 0.01911146752536297, 0.021514706313610077, -0.001316059147939086, 0.027007821947336197, 0.040282852947711945, -0.04440269246697426, 0.015907149761915207, -0.006065316032618284, 0.009155194275081158, 0.026893382892012596, -0.0026607282925397158, ...
[ "ModelBaseModelOutputWithPast", "ModelOutput", "None", "attentions", "class", "hidden_states", "image_hidden_states", "last_hidden_state", "past_key_values", "r" ]
deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py:DeepseekVLHybridCausalLMOutputWithPast
[ -0.00020158913685008883, 0.02637176588177681, 0.02978190779685974, 0.009434727020561695, -0.0011793408775702119, 0.03387407958507538, 0.03773890808224678, -0.030463937669992447, 0.01784641109406948, -0.004205842036753893, 0.020006166771054268, 0.018642110750079155, -0.0008596400148235261, ...
[ "ModelCausalLMOutputWithPast", "ModelOutput", "None", "attentions", "class", "hidden_states", "image_hidden_states", "logits", "loss", "past_key_values", "r" ]
deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py:DeepseekVLHybridLayerNorm
[ -0.0001214670337503776, 0.036052823066711426, 0.017125090584158897, 0.0576845183968544, -0.000281662680208683, 0.018477071076631546, -0.0048164320178329945, -0.03875678405165672, 0.010590516962110996, 0.047995321452617645, 0.009407533332705498, 0.00543608982115984, 0.003577115945518017, 0....
[ "LayerNorm", "ModelLayerNorm", "NotImplementedError", "Unsupported", "__init__", "channels_first", "channels_last", "class", "data", "data_format", "def", "else", "eps", "f", "features", "format", "forward", "if", "in", "kwargs", "nn", "normalized_shape", "not", "permut...
deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py:DeepseekVLSamVisionNeck
[ -0.00005773605153081007, 0.04197190701961517, 0.015908706933259964, 0.023242507129907608, 0.0001983299443963915, 0.030463481321930885, 0.009703182615339756, -0.03768445551395416, 0.01861657202243805, 0.04197190701961517, 0.01918070949614048, 0.0182780884206295, 0.005133660510182381, -0.023...
[ "Conv2d", "ModelLayerNorm", "ModelVLSamVisionNeck", "Module", "__init__", "channels_first", "class", "config", "conv1", "conv2", "data_format", "def", "forward", "hidden_size", "hidden_states", "kernel_size", "layer_norm1", "layer_norm2", "nn", "output_channels", "padding", ...
deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py:DeepseekVLSamVisionProj
[ -0.0002446594007778913, 0.03721659630537033, 0.010892662219703197, 0.025870073586702347, -0.000616967212408781, 0.03403956815600395, 0.029274029657244682, -0.034947291016578674, 0.0026806159876286983, 0.024621956050395966, 0.02484888583421707, -0.014466816559433937, 0.0020565572194755077, ...
[ "Conv2d", "False", "ModelVLSamVisionProj", "Module", "__init__", "align_corners", "bilinear", "class", "config", "conv1", "conv2", "def", "features", "forward", "functional", "interpolate", "kernel_size", "mode", "nn", "output_channels", "output_size", "padding", "return"...
deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py:DeepseekVLHybridAligner
[ -0.0003298733208794147, 0.010498576797544956, 0.03671633452177048, 0.03648685663938522, -0.0011330432025715709, 0.040846921503543854, 0.04130587726831436, -0.053468164056539536, 0.0017712763510644436, 0.033274177461862564, 0.020423460751771927, 0.028110943734645844, -0.0008856381755322218, ...
[ "GELU", "Linear", "ModelAligner", "Module", "__init__", "act", "class", "concat", "config", "def", "dim", "encodings", "forward", "hidden_size", "high_res_in_channels", "high_res_vision_config", "high_res_vision_encodings", "high_res_vision_proj", "in_channels", "nn", "out_ch...
deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py:DeepseekVLHybridPreTrainedModel
[ -0.00012019151472486556, 0.04649043083190918, 0.007636109367012978, 0.012240233831107616, -0.000617626472376287, 0.02582801692187786, 0.005109455436468124, -0.02088700421154499, 0, 0.017405837774276733, 0.01358778215944767, -0.006653521675616503, -0.0022038035094738007, 0.01094883307814598...
[ "Conv2d", "Linear", "LlamaDecoderLayer", "ModelConfig", "ModelLayerNorm", "ModelModel", "ModelPreTrainedModel", "None", "PreTrainedModel", "True", "_can_compile_fullgraph", "_init_weights", "_no_split_modules", "_skip_keys_device_placement", "_supports_flash_attn", "_supports_sdpa", ...
deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py:DeepseekVLHybridModel
[ -0.00024457028484903276, 0.03221238777041435, 0.013781003654003143, 0.018261246383190155, -0.0008967576432041824, 0.03153184428811073, 0.030624451115727425, -0.03198553994297981, 0.003969836514443159, 0.03243923559784889, 0.017580702900886536, 0.014461546204984188, 0.0020416302140802145, 0...
[ "AutoModel", "BaseModelOutputWithHighResVisionEncodings", "False", "Image", "ModelAligner", "ModelBaseModelOutputWithPast", "ModelModel", "ModelPreTrainedModel", "ModelVLSamVisionNeck", "ModelVLSamVisionProj", "Model_VL_COMMON_CUSTOM_ARGS", "None", "Parameter", "True", "ValueError", "Y...
deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py:DeepseekVLHybridForConditionalGeneration
[ -0.000255380233284086, 0.037001755088567734, 0.005306233651936054, 0.01747935824096203, -0.0010428025852888823, 0.029056593775749207, 0.028262076899409294, -0.03949880599975586, -0.0032631917856633663, 0.023154472932219505, 0.03541272506117821, -0.003220628248527646, -0.0004575562197715044, ...
[ "False", "GenerationMixin", "Linear", "ModelCausalLMOutputWithPast", "ModelForConditionalGeneration", "ModelModel", "ModelPreTrainedModel", "Model_VL_COMMON_CUSTOM_ARGS", "None", "True", "__init__", "_can_compile_fullgraph", "_tied_weights_keys", "attention_mask", "attentions", "auto_d...
fastspeech2_conformer/modeling_fastspeech2_conformer.py:FastSpeech2ConformerModelOutput
[ -0.0000839982894831337, 0.033273935317993164, 0.001110545825213194, 0.014486610889434814, -0.0005375890759751201, 0.03915911912918091, 0.07696011662483215, -0.02048497274518013, 0.016184261068701744, -0.01935320720076561, 0.0187873225659132, 0.027728278189897537, -0.0043007126078009605, 0....
[ "ModelModelOutput", "ModelOutput", "None", "class", "decoder_attentions", "decoder_hidden_states", "duration_outputs", "encoder_attentions", "encoder_hidden_states", "encoder_last_hidden_state", "energy_outputs", "loss", "pitch_outputs", "r", "spectrogram" ]
fastspeech2_conformer/modeling_fastspeech2_conformer.py:FastSpeech2ConformerWithHifiGanOutput
[ -0.00030680125928483903, 0.023258456960320473, -0.0010518900817260146, 0.02559599094092846, -0.0015851398929953575, 0.018817143514752388, 0.0846187099814415, -0.03155670315027237, 0.005084135103970766, -0.024193471297621727, 0.030387934297323227, 0.025128483772277832, -0.005113354418426752, ...
[ "ModelModelOutput", "ModelWithHifiGanOutput", "None", "class", "r", "waveform" ]
fastspeech2_conformer/modeling_fastspeech2_conformer.py:length_regulator
[ -0.000399101700168103, 0.02801436372101307, 0.0045317355543375015, 0.0014345591189339757, -0.0018686051480472088, 0.03978510573506355, 0.043551743030548096, -0.0353122241795063, -0.002589563140645623, -0.006738749332726002, 0.043551743030548096, -0.02189357951283455, -0.0013757053529843688, ...
[ "Model_regulator", "ValueError", "be", "def", "device", "dim", "dtype", "duration_labels", "elif", "encoded_embedding", "encoded_embeddings", "enumerate", "eq", "float", "for", "greater", "hidden_states", "i", "if", "in", "long", "max", "max_len", "must", "raise", "...
fastspeech2_conformer/modeling_fastspeech2_conformer.py:FastSpeech2ConformerDurationPredictor
[ -0.0002947714820038527, 0.02691766805946827, 0.010122884064912796, 0.0008807196863926947, -0.0013084977399557829, 0.03197911009192467, 0.058666713535785675, -0.03934120759367943, 0.0009490203810855746, 0.009662752971053123, 0.042332060635089874, -0.007707195822149515, -0.0018692825688049197,...
[ "Linear", "ModelDurationPredictor", "ModelPredictorLayer", "Module", "ModuleList", "__init__", "append", "clamp", "class", "config", "conv_layers", "def", "duration_predictor_channels", "duration_predictor_dropout_rate", "duration_predictor_kernel_size", "duration_predictor_layers", ...
fastspeech2_conformer/modeling_fastspeech2_conformer.py:FastSpeech2ConformerBatchNormConvLayer
[ -0.00015789967437740415, 0.012194441631436348, 0.02088862657546997, -0.02088862657546997, -0.00028933570138178766, 0.024840528145432472, 0.014396215789020061, -0.025743821635842323, 0.009258742444217205, -0.023711415007710457, 0.05035852640867233, -0.008242539130151272, 0.0017924699932336807...
[ "BatchNorm1d", "Conv1d", "Dropout", "ModelBatchNormConvLayer", "Module", "None", "Tanh", "__init__", "activation", "batch_norm", "class", "config", "conv", "def", "dropout", "else", "forward", "hidden_states", "if", "in_conv_dim", "is", "kernel_size", "layer_id", "nn", ...
fastspeech2_conformer/modeling_fastspeech2_conformer.py:FastSpeech2ConformerSpeechDecoderPostnet
[ -0.00022602542594540864, 0.025855885818600655, 0.014921237714588642, 0.016743678599596024, -0.0008720665937289596, 0.02255271002650261, 0.03667663037776947, -0.03918248787522316, 0.007289764937013388, -0.019249536097049713, 0.033942967653274536, 0.013497455045580864, 0.000772401865106076, ...
[ "Linear", "ModelBatchNormConvLayer", "ModelSpeechDecoderPostnet", "Module", "ModuleList", "__init__", "class", "config", "def", "feat_out", "for", "forward", "hidden_size", "hidden_states", "i", "in", "layer", "layer_output", "layers", "nn", "num_mel_bins", "outputs_after_p...
fastspeech2_conformer/modeling_fastspeech2_conformer.py:FastSpeech2ConformerPredictorLayer
[ -0.00019965777755714953, 0.039352018386125565, 0.011986534111201763, 0.005116892978549004, -0.000614875287283212, 0.038673534989356995, 0.031436383724212646, -0.007972176186740398, 0.00842449814081192, 0.0032934697810560465, 0.044779881834983826, -0.010064165107905865, 0.00042758567724376917...
[ "Conv1d", "Dropout", "LayerNorm", "ModelPredictorLayer", "Module", "ReLU", "__init__", "activation", "class", "conv", "def", "dropout", "dropout_rate", "forward", "hidden_states", "input_channels", "kernel_size", "layer_norm", "nn", "num_chans", "padding", "return", "self...
fastspeech2_conformer/modeling_fastspeech2_conformer.py:FastSpeech2ConformerVariancePredictor
[ -0.00025774919777177274, 0.03663238137960434, 0.012400402687489986, 0.002815687796100974, -0.0009030109504237771, 0.026507282629609108, 0.030489062890410423, -0.009214977733790874, 0.0014149541966617107, 0.003000556258484721, 0.03799756243824959, 0.015585827641189098, 0.00046572613064199686,...
[ "Linear", "ModelPredictorLayer", "ModelVariancePredictor", "Module", "ModuleList", "None", "__init__", "append", "class", "config", "conv_layers", "def", "dropout_rate", "else", "encoder_hidden_states", "for", "forward", "hidden_size", "hidden_states", "idx", "if", "in", ...
fastspeech2_conformer/modeling_fastspeech2_conformer.py:FastSpeech2ConformerVarianceEmbedding
[ -0.00021539026056416333, 0.027343038469552994, 0.028250692412257195, -0.005275731440633535, -0.0005814650212414563, 0.03290241211652756, 0.022804776206612587, -0.005956471432000399, 0.004566628020256758, 0.018493425101041794, 0.037667591124773026, 0.003375333733856678, -0.0003758249804377556...
[ "Conv1d", "Dropout", "ModelVarianceEmbedding", "Module", "__init__", "class", "conv", "def", "dropout", "dropout_rate", "forward", "hidden_states", "in_channels", "kernel_size", "nn", "out_channels", "padding", "return", "self", "super", "transpose" ]
fastspeech2_conformer/modeling_fastspeech2_conformer.py:FastSpeech2ConformerAttention
[ -0.0002286892122356221, 0.04334103688597679, 0.046971701085567474, -0.018834063783288002, -0.0011274910066276789, 0.024280058220028877, 0.018493689596652985, -0.016451440751552582, 0.0016876909648999572, 0.019741728901863098, 0.01565723307430744, 0.02530118264257908, 0.0025244452990591526, ...
[ "Dropout", "False", "Linear", "ModelAttention", "Module", "None", "Parameter", "Tensor", "_", "__init__", "attention_dropout_rate", "attention_mask", "attn_output", "attn_weights", "bsz", "bsz_pos", "but", "cat", "class", "config", "contiguous", "def", "device", "dim", ...
fastspeech2_conformer/modeling_fastspeech2_conformer.py:FastSpeech2ConformerConvolutionModule
[ -0.000059254416555631906, 0.021848076954483986, 0.023659316822886467, -0.015961548313498497, 0.00010126314737135544, 0.02716859243810177, -0.004782804287970066, -0.04958268254995346, 0.010924038477241993, 0.0044431970454752445, 0.04007367417216301, -0.002108396030962467, 0.002957414602860808...
[ "ACT2FN", "BatchNorm1d", "Conv1d", "ModelConvolutionModule", "Module", "None", "__init__", "activation", "all", "all_masked_rows", "attention_mask", "bool", "channels", "class", "config", "conv_kernel_size", "def", "depthwise_conv", "dim", "else", "forward", "functional", ...
fastspeech2_conformer/modeling_fastspeech2_conformer.py:FastSpeech2ConformerEncoderLayer
[ -0.00016332374070771039, 0.026731543242931366, 0.027531204745173454, 0.027759680524468422, -0.0007960916263982654, 0.05140681564807892, 0.02147662453353405, -0.02913052961230278, 0.007054157555103302, -0.017021367326378822, 0.03587053343653679, 0.02261899784207344, 0.00043196004116907716, ...
[ "Dropout", "False", "LayerNorm", "Linear", "ModelAttention", "ModelConvolutionModule", "ModelEncoderLayer", "ModelMultiLayeredConv1d", "Module", "None", "__init__", "attention_mask", "attention_output", "attention_scores", "cat", "class", "concat_after", "concat_linear", "config"...
fastspeech2_conformer/modeling_fastspeech2_conformer.py:FastSpeech2ConformerMultiLayeredConv1d
[ -0.00010407396621303633, 0.028456922620534897, 0.028003424406051636, -0.022901587188243866, 0, 0.025282444432377815, 0.007652757689356804, -0.04058796167373657, 0.013208093121647835, -0.0016439257888123393, 0.03355875983834267, -0.0242620762437582, 0.0020407354459166527, -0.028343547135591...
[ "Conv1d", "Dropout", "ModelMultiLayeredConv1d", "Module", "__init__", "class", "config", "conv1", "conv2", "def", "dropout", "dropout_rate", "forward", "hidden_channels", "hidden_size", "hidden_states", "input_channels", "kernel_size", "linear_units", "module_config", "nn", ...