identifier
stringlengths
24
102
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
blenderbot/modeling_blenderbot.py:BlenderbotForCausalLM
[ -0.00017654977273195982, 0.051267948001623154, 0.01765146479010582, -0.010287318378686905, -0.0008045751019380987, 0.01787632331252098, 0.027882568538188934, -0.02383509837090969, 0.002881011227145791, 0.010343533009290695, 0.019337909296154976, -0.0021923792082816362, -0.0000619241764070466...
[ "CausalLMOutputWithCrossAttentions", "CrossEntropyLoss", "False", "GenerationMixin", "Linear", "ModelDecoderWrapper", "ModelForCausalLM", "ModelPreTrainedModel", "None", "True", "__init__", "_tied_weights_keys", "attention_mask", "attentions", "auto_docstring", "cache_position", "cla...
vaultgemma/modeling_vaultgemma.py:VaultGemmaRMSNorm
[ -0.00007788453513057902, 0.04100974649190903, 0.021411165595054626, 0.05097896605730057, -0.00047438760520890355, 0.053697843104600906, 0.0159734096378088, -0.010195793583989143, 0.009855933487415314, 0.04191603884100914, 0.013990893959999084, 0.012518168427050114, 0.0031578638590872288, 0...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "_norm", "class", "def", "dim", "eps", "extra_repr", "f", "float", "forward", "keepdim", "mean", "nn", "output", "pow", "return", "rsqrt", "self", "shape", "super", "torch", "tuple", "type_as", "weight",...
vaultgemma/modeling_vaultgemma.py:VaultGemmaMLP
[ -0.00025436788564547896, 0.024477045983076096, 0.022052433341741562, 0.026324370875954628, -0.0009453104576095939, 0.05957620590925217, 0.03509915992617607, -0.005801752675324678, 0, -0.00692746601998806, 0.028864441439509392, -0.04710676893591881, -0.0014071415644139051, 0.009756181389093...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_activation", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
vaultgemma/modeling_vaultgemma.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
vaultgemma/modeling_vaultgemma.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
vaultgemma/modeling_vaultgemma.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
vaultgemma/modeling_vaultgemma.py:eager_attention_forward
[ -0.00008721329504624009, 0.034730274230241776, 0.029317762702703476, -0.025145620107650757, -0.00028013967676088214, 0.022326603531837463, 0.05863552540540695, -0.02221384271979332, 0.014546120539307594, 0.015222684480249882, 0.020184151828289032, 0.018718263134360313, 0.0028472058475017548,...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "head_dim", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_v...
vaultgemma/modeling_vaultgemma.py:VaultGemmaAttention
[ -0.00012698510545305908, 0.03476570174098015, 0.028218911960721016, -0.0077319820411503315, -0.0006948907393962145, 0.030927928164601326, 0.04086098447442055, -0.003781334264203906, 0, 0.005446250084787607, 0.02054336853325367, 0.02957342006266117, -0.0010440998012199998, -0.01320645119994...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_logit_softcapping", "attn_output", "attn_weights", "cache_kwargs"...
vaultgemma/modeling_vaultgemma.py:VaultGemmaDecoderLayer
[ -0.00016038368630688637, 0.047374874353408813, 0.011900116689503193, 0.0013253685319796205, -0.0005851360619999468, 0.039027873426675797, 0.040832627564668655, -0.03226003423333168, 0.005047679878771305, -0.006147454027086496, -0.0014522654237225652, 0.02357463911175728, -0.00093762768665328...
[ "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "ModelRMSNorm", "None", "Tensor", "_", "__init__", "attention_mask", "attention_type", "cache_position", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "input_layernorm...
vaultgemma/modeling_vaultgemma.py:VaultGemmaRotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
vaultgemma/modeling_vaultgemma.py:VaultGemmaPreTrainedModel
[ -0.00024435759405605495, 0.03762398660182953, -0.001232412178069353, 0.013372380286455154, -0.0009420161950401962, 0.026518110185861588, 0.019265294075012207, -0.022778376936912537, -0.005099636502563953, 0.011332525871694088, 0.00010535707406233996, 0.006431208457797766, -0.0029322910122573...
[ "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "PreTrainedModel", "RMSNorm", "True", "__class__", "__name__", "_can_compile_fullgraph", "_can_record_outputs", "_init_weights", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend"...
vaultgemma/modeling_vaultgemma.py:VaultGemmaModel
[ -0.00011123572039650753, 0.04479677975177765, -0.000353492156136781, -0.004220801871269941, -0.0006612589932046831, 0.037818387150764465, 0.039619263261556625, -0.01744598150253296, 0.012043355032801628, 0.0009426457691006362, 0.022510943934321404, 0.008329049684107304, -0.001097408472560346...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "None", "ValueError", "You", "__init__", "and", "arange", "attention_mask", "attention_type", "auto_do...
vaultgemma/modeling_vaultgemma.py:VaultGemmaForCausalLM
[ -0.00030409442842938006, 0.04762476682662964, 0.006840335670858622, -0.0021608592942357063, -0.0013952568406239152, 0.020835835486650467, 0.035489607602357864, 0.008872401900589466, 0.004092753399163485, 0.01808825135231018, 0.030681340023875237, -0.004035511985421181, 0.0020034457556903362,...
[ "CausalLMOutputWithPast", "GenerationMixin", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "cache_position", "can_return_tuple", "class", "colwi...
qwen3_moe/modeling_qwen3_moe.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
qwen3_moe/modeling_qwen3_moe.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
qwen3_moe/modeling_qwen3_moe.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
qwen3_moe/modeling_qwen3_moe.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeAttention
[ -0.00005800188228022307, 0.03622129559516907, 0.026209820061922073, 0.0046120285987854, -0.000298797560390085, 0.03217171132564545, 0.03622129559516907, -0.008324149064719677, 0.00350120454095304, 0.016648298129439354, 0.021485302597284317, 0.03217171132564545, 0, -0.020022952929139137, ...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "ModelRMSNorm", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_kwargs", "cache...
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeMLP
[ -0.00024263472005259246, 0.02898676134645939, 0.02358049899339676, 0.02829659916460514, -0.0008483228739351034, 0.05774346739053726, 0.029906975105404854, -0.005061180330812931, -0.001998591236770153, -0.004831126891076565, 0.028066545724868774, -0.04946153610944748, -0.0024299416691064835, ...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "None", "__init__", "act_fn", "class", "config", "def", "down_proj", "else", "forward", "gate_proj", "hidden_act", "hidden_size", "if", "intermediate_size", "is", "nn", "return", "self", "super", "up_proj", "x" ]
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeExperts
[ -0.0003679879300761968, 0.03843185305595398, -0.010252400301396847, -0.0007396374130621552, -0.0015305369161069393, 0.0618659108877182, 0.0588194839656353, -0.01242005079984665, -0.00471610389649868, -0.020621970295906067, 0.014997797086834908, -0.022731035947799683, -0.0021383578423410654, ...
[ "ACT2FN", "ModelExperts", "Module", "None", "Parameter", "__init__", "act_fn", "chunk", "class", "config", "continue", "current_hidden_states", "current_state", "def", "dim", "down_proj", "dtype", "empty", "expert_hit", "expert_idx", "expert_mask", "final_hidden_states", ...
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeTopKRouter
[ -0.00036532338708639145, 0.038302045315504074, 0.0029519598465412855, -0.003818455385044217, -0.0016155003104358912, 0.05780554190278053, 0.07660409063100815, -0.025495532900094986, -0.0035687871277332306, 0.01621374860405922, 0.022440768778324127, -0.02114836871623993, -0.001696275430731475...
[ "F", "ModelTopKRouter", "Module", "Parameter", "True", "__init__", "class", "config", "def", "dim", "dtype", "float", "forward", "functional", "hidden_dim", "hidden_size", "hidden_states", "if", "keepdim", "linear", "nn", "norm_topk_prob", "num_experts", "num_experts_pe...
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeSparseMoeBlock
[ -0.000361688929842785, 0.018237890675663948, 0.00248432788066566, 0.00014522358833346516, -0.001402914640493691, 0.04793291538953781, 0.03413758799433708, -0.03974924609065056, -0.006108523812144995, -0.019406985491514206, 0.015198241919279099, -0.03413758799433708, 0.0006247354322113097, ...
[ "ModelExperts", "ModelSparseMoeBlock", "ModelTopKRouter", "Module", "_", "__init__", "batch_size", "class", "config", "def", "experts", "final_hidden_states", "forward", "gate", "hidden_dim", "hidden_states", "hidden_states_reshaped", "nn", "reshape", "return", "routing_weigh...
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeRMSNorm
[ -0.00009884802420856431, 0.04179859161376953, 0.03230918198823929, 0.053095508366823196, -0.00046952810953371227, 0.03931327164173126, 0.022028988227248192, -0.02937198430299759, 0.007964326068758965, 0.042476408183574677, 0.019995542243123055, 0.006975846365094185, 0.0028524715453386307, ...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tuple", ...
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeDecoderLayer
[ -0.00028958593611605465, 0.04210079088807106, 0.011440431699156761, 0.0015373080968856812, -0.0010939913336187601, 0.04438887536525726, 0.04072793945670128, -0.04187197983264923, 0.002102179452776909, -0.016474222764372826, 0.005977625492960215, 0.01355691161006689, -0.0018304691184312105, ...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "ModelRMSNorm", "ModelSparseMoeBlock", "None", "Tensor", "_", "__init__", "and", "attention_mask", "cache_position", "class", "config", "decoder_sparse_step", "def", "else", "eps", "forw...
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoePreTrainedModel
[ -0.00040246229036711156, 0.03465566039085388, -0.009717634879052639, 0.003366048214957118, -0.0016098491614684463, 0.052451811730861664, 0.044022057205438614, -0.014986231923103333, -0.008722455240786076, -0.005707647185772657, 0.003541668178513646, -0.012937333434820175, -0.0036148431245237...
[ "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelExperts", "ModelPreTrainedModel", "ModelTopKRouter", "OutputRecorder", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_init_weights", "_no_split_modules", "_skip_keys_device_placement", "_supports_a...
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeRotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeModel
[ -0.00018928055942524225, 0.05141322314739227, -0.005914795678108931, -0.01006652694195509, -0.0010023872600868344, 0.04572591930627823, 0.040721092373132706, -0.013820147141814232, 0.01097649522125721, -0.0023744492791593075, 0.023090451955795288, -0.006625708658248186, -0.001834155293181538...
[ "DynamicCache", "Embedding", "False", "ModelDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "MoeModelOutputWithPast", "None", "ValueError", "You", "__init__", "and", "arange", "attention_mask", "auto_docstring", "cache_po...
qwen3_moe/modeling_qwen3_moe.py:load_balancing_loss_func
[ -0.00027690583374351263, 0.01840798556804657, 0.001643570140004158, -0.028812499716877937, -0.0009432663209736347, 0.05808233842253685, 0.038874007761478424, -0.011547867208719254, 0, -0.02252405695617199, 0.031556546688079834, -0.0061455233953893185, -0.0008396499906666577, 0.014349081553...
[ "Model_balancing_loss_func", "None", "_", "attention_mask", "batch_size", "cat", "compute_device", "concatenated_gate_logits", "def", "device", "dim", "else", "expand", "expert_attention_mask", "expert_mask", "float", "for", "functional", "gate_logits", "if", "in", "is", ...
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeForCausalLM
[ -0.00044288873323239386, 0.0379493422806263, -0.00161050446331501, -0.01030722912400961, -0.001698350184597075, 0.04966210201382637, 0.04029189422726631, -0.004597258288413286, -0.005153614562004805, 0.006207762751728296, 0.02857913449406624, -0.0060027893632650375, -0.00006588427640963346, ...
[ "GenerationMixin", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "MoeCausalLMOutputWithPast", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "aux_loss", "cache_position", "can_return_tuple", ...
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeForSequenceClassification
[ -0.0002121782599715516, 0.015340753830969334, -0.017272552475333214, 0.0278406273573637, -0.0007244244916364551, 0.027954263612627983, 0.015227118507027626, -0.011079433374106884, 0.0032101948745548725, -0.013863496482372284, 0.02909061498939991, 0.008409005589783192, -0.003437465289607644, ...
[ "GenericForSequenceClassification", "ModelForSequenceClassification", "ModelPreTrainedModel", "class", "pass" ]
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeForTokenClassification
[ -0.00016310509818140417, 0.024394849315285683, -0.0069496952928602695, -0.024848707020282745, -0.0007446102099493146, 0.035854753106832504, 0.0356278270483017, -0.007885776460170746, 0.0015317696379497647, -0.018381234258413315, 0.040166404098272324, 0.024394849315285683, -0.0042832815088331...
[ "GenericForTokenClassification", "ModelForTokenClassification", "ModelPreTrainedModel", "class", "pass" ]
qwen3_moe/modeling_qwen3_moe.py:Qwen3MoeForQuestionAnswering
[ -0.00014039420057088137, 0.034368500113487244, 0.020329080522060394, 0.027292631566524506, -0.0005580669385381043, 0.03549165278673172, 0.04155668243765831, 0.023586224764585495, -0.001811085152439773, 0.014600996859371662, 0.004239904694259167, 0.009715278632938862, -0.003355421358719468, ...
[ "GenericForQuestionAnswering", "ModelForQuestionAnswering", "ModelPreTrainedModel", "base_model_prefix", "class", "transformer" ]
yoso/modeling_yoso.py:load_cuda_kernels
[ -0.00016524974489584565, 0.015372741036117077, -0.0025284113362431526, -0.007512993644922972, -0.000606818706728518, 0.04646497592329979, 0.02727794647216797, -0.03791172057390213, 0.024272749200463295, 0.019996121525764465, 0.015025987289845943, -0.02508184127509594, 0.001632631290704012, ...
[ "ImportError", "Model", "Model_cuda_kernels", "community", "def", "get_kernel", "global", "if", "install", "installed", "is", "is_kernels_available", "it", "kernels", "lsh_cumulation", "not", "pip", "please", "raise", "with" ]
yoso/modeling_yoso.py:to_contiguous
[ -0.000021994561393512413, 0.004357354715466499, 0.03983867168426514, -0.002914334647357464, -0.00037667067954316735, 0.03440612554550171, -0.008601531386375427, -0.06835953891277313, 0.01765577495098114, 0.03123714029788971, -0.011034859344363213, -0.023088321089744568, 0.0005411325255408883...
[ "append", "contiguous", "def", "else", "for", "if", "in", "input_tensors", "isinstance", "list", "out", "return", "tensor", "to_contiguous" ]
yoso/modeling_yoso.py:normalize
[ 0.00004497935515246354, 0.009200483560562134, 0.0246098805218935, 0.014111170545220375, 0.00048330757999792695, 0.05621890351176262, 0.027432115748524666, -0.03228635713458061, 0.015578731894493103, 0.0036689043045043945, 0.03160902112722397, -0.02449699118733406, 0.0016439514001831412, 0....
[ "Model", "def", "dim", "else", "for", "if", "in", "input_tensors", "isinstance", "list", "out", "p", "return", "tensor" ]
yoso/modeling_yoso.py:hashing
[ 0.00006593556463485584, -0.03809293359518051, 0.016879504546523094, -0.008724878542125225, 0.0008411239250563085, 0.0348995141685009, 0.047673191875219345, -0.0021527069620788097, 0.02554735727608204, 0.025433305650949478, 0.017677858471870422, -0.0328466035425663, -0.0002940369595307857, ...
[ "Key", "Model", "Query", "ValueError", "arange", "def", "device", "dim", "has", "hash_len", "if", "incorrect", "int", "key", "key_binary", "key_projection", "len", "matmul", "num_hash", "query", "query_binary", "query_hash", "query_projection", "raise", "raise_pow", ...
yoso/modeling_yoso.py:YosoCumulation
[ -0.0002785766264423728, -0.005434940103441477, 0.030596699565649033, -0.013630485162138939, -0.0009920921875163913, 0.031286850571632385, 0.021969810128211975, 0.009892166592180729, 0.012595257721841335, 0.031056800857186317, 0.01455068588256836, -0.00023184764722827822, -0.00161035265773534...
[ "Function", "ModelCumulation", "None", "acos", "autograd", "backward", "class", "config", "ctx", "cumulation_value", "def", "expectation", "forward", "grad", "grad_key", "grad_query", "grad_value", "hash_code_len", "key", "key_mask", "math", "matmul", "pi", "query", "...
yoso/modeling_yoso.py:YosoLSHCumulation
[ -0.0003169644623994827, -0.024342870339751244, 0.023879196494817734, -0.008925719186663628, -0.0014055109350010753, 0.026081647723913193, 0.014663681387901306, -0.004144083708524704, 0.013214700855314732, 0.027124913409352303, 0.014779600314795971, 0.003013879293575883, -0.000688265659846365...
[ "Function", "Key", "ModelLSHCumulation", "None", "Query", "Value", "ValueError", "acos", "and", "autograd", "backward", "class", "config", "ctx", "cumulation_value", "def", "differ", "dimension", "else", "expectation", "fast_hash", "forward", "grad", "grad_key", "grad...
yoso/modeling_yoso.py:YosoEmbeddings
[ -0.00024067205958999693, 0.015973493456840515, 0.011409638449549675, -0.00889951828867197, -0.001269322237931192, 0.03742361441254616, 0.033772531896829605, -0.004506807308644056, 0.005961535964161158, -0.012322410009801388, 0.02224879525601864, 0.02281927689909935, -0.0014190737856552005, ...
[ "Dropout", "Embedding", "False", "LayerNorm", "ModelEmbeddings", "Module", "None", "__init__", "arange", "buffered_token_type_ids", "buffered_token_type_ids_expanded", "class", "config", "def", "device", "dropout", "dtype", "else", "embeddings", "eps", "expand", "forward", ...
yoso/modeling_yoso.py:YosoSelfAttention
[ -0.0001517046766821295, 0.0356752946972847, 0.03319157287478447, -0.008805927820503712, -0.0007549953879788518, 0.020434267818927765, 0.01975688897073269, -0.022353509441018105, 0.005334360059350729, 0.02596619725227356, 0.01061227172613144, 0.009821996092796326, 0.0019474647706374526, -0....
[ "Conv2d", "Could", "Dropout", "Exception", "False", "Linear", "ModelCumulation", "ModelLSHCumulation", "ModelSelfAttention", "Module", "None", "The", "ValueError", "_", "__init__", "a", "all_head_size", "and", "apply", "as", "attention", "attention_head_size", "attention_...
yoso/modeling_yoso.py:YosoSelfOutput
[ -0.00012168083776487038, 0.04875698313117027, 0.03859927877783775, 0.020879726856946945, -0.0006630723946727812, 0.05620596557855606, 0.02370131015777588, -0.01952536590397358, 0.0035551965702325106, 0.017155233770608902, 0.01365646906197071, 0.003512872848659754, 0.0036962758749723434, -0...
[ "Dropout", "LayerNorm", "Linear", "ModelSelfOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "layer_norm_eps", "nn", "return", "self", "super" ]
yoso/modeling_yoso.py:YosoAttention
[ 0.00018736459605861455, 0.03642650693655014, 0.04434531182050705, 0.0012938763247802854, 0.0007105714175850153, 0.03914152458310127, 0.037784017622470856, -0.02386954240500927, 0.016063863411545753, -0.0051755052991211414, 0.019910139963030815, 0.02466142363846302, 0.003846277017146349, -0...
[ "False", "ModelAttention", "ModelSelfAttention", "ModelSelfOutput", "Module", "None", "__init__", "attention_mask", "attention_output", "class", "config", "def", "forward", "hidden_states", "nn", "output", "output_attentions", "outputs", "return", "self", "self_outputs", "s...
yoso/modeling_yoso.py:YosoIntermediate
[ -0.00025367451598867774, 0.02240910567343235, 0.04047359153628349, 0.012862369418144226, -0.0009432404185645282, 0.03635763004422188, 0.03452831506729126, -0.018864808604121208, -0.001329111517407, -0.003772961674258113, 0.023209432139992714, -0.02103712037205696, -0.0013719861162826419, 0...
[ "ACT2FN", "Linear", "ModelIntermediate", "Module", "__init__", "class", "config", "def", "dense", "else", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_size", "isinstance", "nn", "return", "self", "str", "super" ]
yoso/modeling_yoso.py:YosoOutput
[ -0.0002591986849438399, 0.03935529664158821, 0.0507957898080349, 0.030660521239042282, -0.0012513039400801063, 0.04988054931163788, 0.03798243775963783, -0.023681821301579475, 0.002259497530758381, 0.014758236706256866, 0.011040075682103634, 0.00267421524040401, 0.0013013561256229877, 0.00...
[ "Dropout", "LayerNorm", "Linear", "ModelOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "intermediate_size", "layer_norm_eps", "nn", "return", "self", "su...
yoso/modeling_yoso.py:YosoLayer
[ -0.000056466607929905877, 0.016024259850382805, 0.025212997570633888, 0.008068159222602844, 0.00009629964188206941, 0.040116678923368454, 0.025437112897634506, -0.004426281899213791, 0.005911047104746103, 0.0042301807552576065, 0.009020649828016758, 0.006723465863615274, 0.002983537968248129...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelIntermediate", "ModelLayer", "ModelOutput", "None", "__init__", "add_cross_attention", "apply_chunking_to_forward", "attention", "attention_mask", "attention_output", "chunk_size_feed_forward", "class", "config", "def", ...
yoso/modeling_yoso.py:YosoEncoder
[ -0.000058727651776280254, 0.01099521853029728, 0.01806357316672802, 0.023785576224327087, -0.0001003629295155406, 0.034107618033885956, 0.00903178658336401, -0.03006855770945549, 0.003786618821322918, 0.008695198222994804, 0.015707455575466156, 0.0004663150757551193, -0.0011780591448768973, ...
[ "BaseModelOutputWithCrossAttentions", "False", "ModelEncoder", "ModelLayer", "Module", "ModuleList", "None", "True", "_", "__init__", "all_hidden_states", "all_self_attentions", "attention_mask", "attentions", "class", "config", "def", "else", "enumerate", "for", "forward", ...
yoso/modeling_yoso.py:YosoPredictionHeadTransform
[ -0.0002919524849858135, 0.041955187916755676, 0.0442478209733963, 0.03209686279296875, -0.0013899088371545076, 0.026136018335819244, 0.029574967920780182, -0.011348534375429153, -0.0021780014503747225, 0.020519066601991653, 0.01662158966064453, 0.00664863595739007, 0.0001226916938321665, 0...
[ "ACT2FN", "LayerNorm", "Linear", "ModelPredictionHeadTransform", "Module", "__init__", "class", "config", "def", "dense", "else", "eps", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "isinstance", "layer_norm_eps", "nn", "return", "self", "str", "super"...
yoso/modeling_yoso.py:YosoLMPredictionHead
[ -0.00033743318635970354, 0.03515048697590828, 0.03377203643321991, 0.018723953515291214, -0.0015794745413586497, 0.024926980957388878, 0.056746214628219604, -0.029292073100805283, 0.00021807517623528838, 0.004077916033565998, 0.019528048112988472, 0.008155832067131996, -0.0002458954695612192...
[ "Linear", "ModelLMPredictionHead", "ModelPredictionHeadTransform", "Module", "Parameter", "__init__", "bias", "class", "config", "decoder", "def", "forward", "hidden_size", "hidden_states", "nn", "return", "self", "super", "torch", "transform", "vocab_size", "zeros" ]
yoso/modeling_yoso.py:YosoOnlyMLMHead
[ -0.00036667720996774733, 0.01974974200129509, 0.018704168498516083, 0.03159958869218826, -0.0016627541044726968, 0.03694363683462143, 0.04042888432741165, -0.006534841377288103, -0.003775686025619507, -0.006215360015630722, 0.01579979434609413, 0.011210883036255836, -0.002410630462691188, ...
[ "ModelLMPredictionHead", "ModelOnlyMLMHead", "Module", "__init__", "class", "config", "def", "forward", "nn", "prediction_scores", "predictions", "return", "self", "sequence_output", "super" ]
yoso/modeling_yoso.py:YosoPreTrainedModel
[ -0.000320592662319541, 0.03324133902788162, 0.0007307362975552678, -0.0033241338096559048, -0.0014184881001710892, 0.02601994387805462, 0.026249194517731667, -0.002249521669000387, -0.0013826676877215505, -0.001533113420009613, 0.0029086170252412558, 0.0052154515869915485, -0.001676395069807...
[ "Model", "ModelConfig", "ModelEmbeddings", "ModelLMPredictionHead", "ModelPreTrainedModel", "PreTrainedModel", "True", "_init_weights", "arange", "base_model_prefix", "bias", "class", "config", "copy_", "def", "elif", "expand", "if", "init", "isinstance", "module", "no_grad...
yoso/modeling_yoso.py:YosoModel
[ 0.000022142125089885667, 0.041755225509405136, 0.009653089568018913, 0.0030165906064212322, -0.00009470691293245181, 0.038836851716041565, 0.01751025579869747, -0.002904345281422138, 0.007127572316676378, 0.002974498551338911, 0.01526535116136074, 0.019979652017354965, -0.0002648285881150514...
[ "BaseModelOutputWithCrossAttentions", "ModelEmbeddings", "ModelEncoder", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "batch_size", "buffered_token_type_ids", "buffered_token_type_ids_expanded", "class", "config", ...
yoso/modeling_yoso.py:YosoForMaskedLM
[ -0.00009304386912845075, 0.03673059120774269, 0.01636180840432644, -0.012800054624676704, -0.0004417409945745021, 0.040960174053907394, 0.014803540892899036, -0.010184391401708126, 0.003951320890337229, -0.006149591412395239, 0.022038353607058525, 0.03984712436795235, -0.000744350953027606, ...
[ "CrossEntropyLoss", "MaskedLMOutput", "Model", "ModelForMaskedLM", "ModelModel", "ModelOnlyMLMHead", "ModelPreTrainedModel", "None", "__init__", "_tied_weights_keys", "attention_mask", "attentions", "auto_docstring", "bias", "class", "cls", "config", "decoder", "def", "else", ...
yoso/modeling_yoso.py:YosoClassificationHead
[ -0.00036297834594734013, 0.023115038871765137, 0.031898751854896545, 0.01999450847506523, -0.0014085726579651237, 0.01652725227177143, 0.052471134811639786, 0.0021670348942279816, -0.006645573303103447, 0.00733902445062995, 0.02334618754684925, -0.004131813067942858, -0.0007765208138152957, ...
[ "ACT2FN", "Dropout", "Linear", "ModelClassificationHead", "Module", "__init__", "class", "config", "def", "dense", "dropout", "features", "forward", "hidden_act", "hidden_dropout_prob", "hidden_size", "kwargs", "nn", "num_labels", "out_proj", "return", "self", "super", ...
yoso/modeling_yoso.py:YosoForSequenceClassification
[ -0.00035359952016733587, 0.02811652049422264, -0.0028430831152945757, 0.013029607012867928, -0.0011429479345679283, 0.02811652049422264, 0.02045876905322075, 0.011258037760853767, -0.004971823655068874, 0.011829511262476444, 0.05669021978974342, 0.0021287405397742987, -0.0001812644040910527,...
[ "BCEWithLogitsLoss", "CrossEntropyLoss", "MSELoss", "Model", "ModelClassificationHead", "ModelForSequenceClassification", "ModelModel", "ModelPreTrainedModel", "None", "SequenceClassifierOutput", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "class", "classifie...
yoso/modeling_yoso.py:YosoForMultipleChoice
[ -0.00020023567776661366, 0.054889384657144547, 0.016557540744543076, 0.037197764962911606, -0.0007017108728177845, 0.03991955146193504, 0.03991955146193504, -0.003090362995862961, 0, 0.013325418345630169, 0.03991955146193504, -0.00722974818199873, -0.0008611906087026, -0.012248043902218342...
[ "CrossEntropyLoss", "Linear", "Model", "ModelForMultipleChoice", "ModelModel", "ModelPreTrainedModel", "MultipleChoiceModelOutput", "None", "ReLU", "__init__", "attention_mask", "attentions", "auto_docstring", "class", "classifier", "config", "def", "else", "forward", "hidden_s...
yoso/modeling_yoso.py:YosoForTokenClassification
[ -0.00023566611343994737, 0.0362890399992466, 0.00352967600338161, -0.03424777835607529, -0.0008682435727678239, 0.04127877950668335, 0.036515843123197556, 0.00975267868489027, -0.003657254623249173, 0.0017435748595744371, 0.05851607397198677, 0.028577618300914764, -0.0008717874297872186, -...
[ "CrossEntropyLoss", "Dropout", "Linear", "Model", "ModelForTokenClassification", "ModelModel", "ModelPreTrainedModel", "None", "TokenClassifierOutput", "__init__", "active_labels", "active_logits", "active_loss", "attention_mask", "attentions", "auto_docstring", "class", "classifie...
yoso/modeling_yoso.py:YosoForQuestionAnswering
[ -0.00017493921041022986, 0.0273432619869709, 0.01721612736582756, 0.012546394020318985, -0.0006540440954267979, 0.047710053622722626, 0.039833392947912216, 0.02362998016178608, 0.003966460935771465, 0.029256165027618408, 0.027455786243081093, 0.01710360497236252, 0.001448742812499404, 0.00...
[ "CrossEntropyLoss", "Linear", "Model", "ModelForQuestionAnswering", "ModelModel", "ModelPreTrainedModel", "None", "QuestionAnsweringModelOutput", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "clamp", "class", "config", "def", "dim", "else", "end_logits",...
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:HunYuanMoEV1RMSNorm
[ -0.000138405041070655, 0.06223258003592491, 0.01964641734957695, 0.06404959410429001, -0.0006920251762494445, 0.0365673191845417, 0.020441358909010887, -0.01817009598016739, 0.002100917510688305, 0.04951351508498192, 0.013230102136731148, -0.008290106430649757, 0.0028106868267059326, 0.006...
[ "ModelYuanMoEV1RMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", ...
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:HunYuanMoEV1MLP
[ -0.00026396909379400313, 0.04188791662454605, 0.0215225201100111, 0.0361022911965847, -0.0010269482154399157, 0.0587819367647171, 0.0328623428940773, -0.001576582551933825, -0.005554198753088713, -0.0032399494666606188, 0.0210596714168787, -0.0643361359834671, -0.00020340083574410528, 0.01...
[ "ACT2FN", "Linear", "ModelYuanMoEV1MLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:HunYuanMoEV1Attention
[ -0.0002120882272720337, 0.05202284827828407, 0.02124076895415783, 0.020218485966324806, -0.0009086960344575346, 0.030668489634990692, 0.036120668053627014, -0.01084755826741457, -0.0021297563798725605, 0.028510337695479393, 0.008916580118238926, 0.014482342638075352, -0.00040110410191118717,...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelYuanMoEV1Attention", "ModelYuanMoEV1RMSNorm", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache...
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:HunYuanMoEV1Gate
[ -0.00037249596789479256, 0.024190327152609825, -0.013322209008038044, 0.020684482529759407, -0.0014826799742877483, 0.05025043711066246, 0.06123541668057442, -0.030150262638926506, -0.005989150609821081, 0.0024540910962969065, 0.009232057258486748, -0.0387980118393898, -0.001855175942182541,...
[ "Linear", "ModelYuanMoEV1Gate", "Module", "None", "__init__", "bsz", "class", "config", "def", "dtype", "else", "float", "float32", "forward", "hidden_size", "hidden_states", "if", "int", "isinstance", "layer_idx", "logits", "nn", "num_experts", "reshape", "return", ...
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:HunYuanMoEV1Experts
[ -0.00038211120408959687, 0.04750611260533333, -0.008775759488344193, 0.009477820247411728, -0.0015357579104602337, 0.05312259867787361, 0.06131330877542496, -0.010940447449684143, -0.009828850626945496, -0.02129584364593029, 0.010179881006479263, -0.024104086682200432, -0.0013090508291497827...
[ "ACT2FN", "ModelYuanMoEV1Experts", "Module", "None", "Parameter", "__init__", "act_fn", "cModelk", "class", "config", "continue", "current_hidden_states", "current_state", "def", "dim", "down_proj", "dtype", "empty", "expert_hit", "expert_idx", "expert_mask", "final_hidden_...
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:HunYuanMoEV1Moe
[ -0.0004211437772028148, 0.041375529021024704, -0.005497034639120102, 0.004373984411358833, -0.0016476326854899526, 0.06951088458299637, 0.05319710820913315, -0.02695320174098015, -0.010343882255256176, -0.018559880554676056, 0.03026324324309826, -0.04255768656730652, -0.002748517319560051, ...
[ "F", "ModelYuanMoEV1Experts", "ModelYuanMoEV1Gate", "ModelYuanMoEV1MLP", "ModelYuanMoEV1Moe", "Module", "None", "True", "__init__", "batch_size", "class", "config", "def", "dim", "dtype", "else", "experts", "final_hidden_states", "float", "forward", "gate", "hidden_dim", ...
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:HunYuanMoEV1DecoderLayer
[ -0.00024590702378191054, 0.047898415476083755, 0.012202691286802292, 0.025089645758271217, -0.0009016591357067227, 0.04652988910675049, 0.040599606931209564, -0.02144024148583412, 0.003763446817174554, 0.0019957672338932753, 0.004447709769010544, 0.0006949546514078975, -0.0011903325794264674...
[ "False", "GradientCheckpointingLayer", "ModelYuanMoEV1Attention", "ModelYuanMoEV1DecoderLayer", "ModelYuanMoEV1Moe", "ModelYuanMoEV1RMSNorm", "None", "Tensor", "_", "__init__", "attention_mask", "cache_position", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_...
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:HunYuanMoEV1PreTrainedModel
[ -0.00040645155240781605, 0.05179038643836975, -0.00788626354187727, 0.016125643625855446, -0.0016846589278429747, 0.049907099455595016, 0.040019843727350235, -0.0032221858855336905, -0.012653333134949207, 0.025424372404813766, -0.0014566046884283423, -0.030368000268936157, -0.001883286749944...
[ "ModelYuanMoEV1Attention", "ModelYuanMoEV1Config", "ModelYuanMoEV1DecoderLayer", "ModelYuanMoEV1Experts", "ModelYuanMoEV1PreTrainedModel", "PreTrainedModel", "ROPE_INIT_FUNCTIONS", "RotaryEmbedding", "True", "_", "__class__", "__name__", "_can_compile_fullgraph", "_can_record_outputs", "...
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:HunYuanMoEV1RotaryEmbedding
[ -0.00037350651109591126, 0.05527167394757271, 0.004227000288665295, -0.0024778968654572964, -0.0014867380959913135, 0.04267812892794609, 0.03568171337246895, 0, -0.012010511010885239, 0.027869051322340965, 0.004314455669373274, -0.017024608328938484, -0.0008454000926576555, 0.0142260426655...
[ "False", "ModelYuanMoEV1RotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "alpha", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "de...
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:HunYuanMoEV1Model
[ -0.00029444662504829466, 0.06067036837339401, -0.006003838498145342, 0.021947046741843224, -0.0012065130285918713, 0.03814879432320595, 0.04872014373540878, -0.018499866127967834, 0.001658955356106162, 0.006348556373268366, 0.018384959548711777, -0.016776276752352715, -0.000829477678053081, ...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelYuanMoEV1DecoderLayer", "ModelYuanMoEV1Model", "ModelYuanMoEV1PreTrainedModel", "ModelYuanMoEV1RMSNorm", "ModelYuanMoEV1RotaryEmbedding", "ModuleList", "None", "ValueError", "You", "__init__", "and", "arange", "at...
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:HunYuanMoEV1ForCausalLM
[ -0.00038734215195290744, 0.05050652101635933, 0.00486530689522624, 0.023515651002526283, -0.001498688361607492, 0.04008086398243904, 0.04286103695631027, -0.00920933112502098, -0.005183868575841188, 0.02780175395309925, 0.024210693314671516, -0.018186980858445168, 0.001020845607854426, -0....
[ "CausalLMOutputWithPast", "GenerationMixin", "Linear", "ModelYuanMoEV1ForCausalLM", "ModelYuanMoEV1Model", "ModelYuanMoEV1PreTrainedModel", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "cache_position", "can_return_...
hunyuan_v1_moe/modeling_hunyuan_v1_moe.py:HunYuanMoEV1ForSequenceClassification
[ -0.0002040421823039651, 0.052234798669815063, -0.007210673298686743, 0.04905528947710991, -0.0004577641957439482, 0.03406617417931557, 0.029069801792502403, -0.011979937553405762, -0.0030233729630708694, 0.007011954206973314, 0.03611014410853386, -0.015556885860860348, 0.00021202643983997405...
[ "GenericForSequenceClassification", "ModelYuanMoEV1ForSequenceClassification", "ModelYuanMoEV1PreTrainedModel", "class", "pass" ]
dinov3_vit/modeling_dinov3_vit.py:DINOv3ViTEmbeddings
[ -0.00017891137395054102, 0.02777554839849472, 0.009523045271635056, -0.005470082629472017, -0.0008644430781714618, 0.02720870077610016, 0.01564500294625759, -0.03310391679406166, 0.009466360323131084, -0.0031743482686579227, 0.011677066795527935, 0.0060085877776145935, -0.0014525478472933173...
[ "Conv2d", "Model", "Module", "None", "Parameter", "__init__", "batch_size", "bool_masked_pos", "cat", "class", "cls_token", "config", "def", "dim", "dtype", "embeddings", "empty", "expand", "flatten", "forward", "hidden_size", "if", "is", "kernel_size", "mask_token", ...
dinov3_vit/modeling_dinov3_vit.py:get_patches_center_coordinates
[ -0.000021507541532628238, 0.013247772119939327, -0.0036613040138036013, -0.007434403523802757, 0, -0.0032560452818870544, 0.03219711780548096, -0.021800130605697632, 0.013583158142864704, 0.024930406361818314, 0.001173853175714612, 0.012576998211443424, -0.0026411698199808598, 0.0107882702...
[ "Model_patches_center_coordinates", "arange", "coords", "coords_h", "coords_w", "def", "device", "dim", "dtype", "flatten", "ij", "indexing", "meshgrid", "num_patches_h", "num_patches_w", "return", "stack", "torch" ]
dinov3_vit/modeling_dinov3_vit.py:augment_patches_center_coordinates
[ -0.00011286912922514603, 0.03476369380950928, -0.01828479953110218, -0.03702107444405556, -0.0004832209669984877, 0.009029530920088291, 0.03453795239329338, -0.019187752157449722, 0.013657164759933949, 0.016704631969332695, 0.012359170243144035, 0.011625520884990692, -0.0015237332554534078, ...
[ "Model_patches_center_coordinates", "None", "coords", "def", "device", "dtype", "empty", "exp", "if", "is", "jitter", "jitter_hw", "jitter_range", "log", "not", "np", "rescale", "rescale_hw", "rescale_range", "return", "shift", "shift_hw", "torch", "uniform_" ]
dinov3_vit/modeling_dinov3_vit.py:DINOv3ViTRopePositionEmbedding
[ -0.00019626619177870452, 0.04978944733738899, 0.01875629834830761, -0.0005079830880276859, -0.0008383496897295117, 0.01682383194565773, 0.04092283174395561, -0.005626889411360025, 0.008639264851808548, 0.012788385152816772, 0.0152323879301548, 0.0152323879301548, -0.00033391895703971386, -...
[ "False", "Model", "Module", "None", "Tensor", "_", "__init__", "and", "angles", "arange", "augment_patches_center_coordinates", "base", "class", "config", "cos", "cpu", "def", "device", "device_type", "dtype", "else", "enabled", "flatten", "float32", "forward", "get...
dinov3_vit/modeling_dinov3_vit.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
dinov3_vit/modeling_dinov3_vit.py:eager_attention_forward
[ 0.00002095530362566933, 0.025862814858555794, 0.0269921962171793, -0.01146321278065443, 0.00008955634984886274, 0.03704368323087692, 0.060083046555519104, -0.023942869156599045, 0.021458230912685394, 0.014569009654223919, 0.023152301087975502, 0.026879258453845978, 0.003063444746658206, -0...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "shape", "size", "softmax", "tor...
dinov3_vit/modeling_dinov3_vit.py:apply_rotary_pos_emb
[ -0.00007606267899973318, 0.03101942129433155, 0.0052925474010407925, -0.01822674088180065, -0.0005554344388656318, 0.014943663030862808, 0.05637836456298828, 0.007132202852517366, 0.01630217768251896, 0.02671745792031288, 0.004245358519256115, 0.016528597101569176, -0.0011108688777312636, ...
[ "Model_rotary_pos_emb", "cat", "cos", "def", "dim", "k", "k_patches", "k_prefix_tokens", "kwargs", "num_patches", "num_prefix_tokens", "num_tokens", "q", "q_patches", "q_prefix_tokens", "return", "rotate_half", "shape", "sin", "split", "torch" ]
dinov3_vit/modeling_dinov3_vit.py:DINOv3ViTAttention
[ -0.00012527724902611226, 0.04675135016441345, 0.026085898280143738, 0.0027243387885391712, -0.0005787455593235791, 0.018406933173537254, 0.037943124771118164, 0.014115745201706886, 0.0011363174999132752, 0.018971562385559082, 0.00525105744600296, 0.012817096896469593, -0.002202056348323822, ...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "Model", "Module", "None", "Tensor", "_", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "batch_size", "class", "config", "con...
dinov3_vit/modeling_dinov3_vit.py:DINOv3ViTLayerScale
[ -0.000092705522547476, 0.03593795746564865, 0.038650255650281906, 0.022602489218115807, -0.000494429434183985, 0.03571193292737007, 0.027575036510825157, -0.015143667347729206, 0.006300443783402443, 0.015256679616868496, 0.0021048567723482847, -0.038876280188560486, 0.002980703255161643, -...
[ "Model", "Module", "Parameter", "__init__", "class", "config", "def", "forward", "hidden_size", "hidden_state", "lambda1", "layerscale_value", "nn", "ones", "return", "self", "super", "torch" ]
dinov3_vit/modeling_dinov3_vit.py:drop_path
[ 0, 0.017753854393959045, 0.03775503486394882, -0.02123720571398735, 0, 0.043598074465990067, 0.0339345820248127, -0.027304979041218758, 0.014832334592938423, 0.0017486985307186842, 0.020113544538617134, -0.04944111406803131, 0.0016574009787291288, -0.018315685912966728, -0.05034004524350...
[ "False", "Model_path", "Model_prob", "def", "device", "div", "dtype", "floor_", "if", "input", "keep_prob", "ndim", "not", "or", "output", "rand", "random_tensor", "return", "shape", "torch", "training" ]
dinov3_vit/modeling_dinov3_vit.py:DINOv3ViTDropPath
[ -0.00015407476166728884, 0.015301218256354332, 0.040123194456100464, -0.0190415158867836, -0.0008465257124044001, 0.03150917589664459, 0.042390041053295135, -0.025162002071738243, 0.0022101758513599634, -0.0017993098590523005, 0.013204384595155716, -0.04375014826655388, -0.001430947217158973...
[ "Model", "Module", "None", "__init__", "class", "def", "drop_path", "drop_prob", "extra_repr", "f", "forward", "hidden_states", "nn", "p", "return", "self", "super", "training" ]
dinov3_vit/modeling_dinov3_vit.py:DINOv3ViTMLP
[ -0.0002036199439316988, 0.02377709373831749, 0.03360800817608833, 0.01943320222198963, -0.0009645155514590442, 0.0402381606400013, 0.04206716641783714, -0.004429626744240522, -0.0018147181253880262, 0.002086211461573839, 0.01771850697696209, -0.04069541022181511, -0.0004965469124726951, 0....
[ "ACT2FN", "Linear", "Model", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
dinov3_vit/modeling_dinov3_vit.py:DINOv3ViTGatedMLP
[ -0.00018016117974184453, 0.02960021421313286, 0.019618745893239975, 0.014513283036649227, -0.0008640566375106573, 0.035795606672763824, 0.047956936061382294, 0.002208543010056019, 0.002495366847142577, 0.003671344369649887, 0.009809372946619987, -0.04061424732208252, -0.00117597752250731, ...
[ "ACT2FN", "Linear", "Model", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
dinov3_vit/modeling_dinov3_vit.py:DINOv3ViTLayer
[ -0.0001223125000251457, 0.03108673170208931, 0.014079135842621326, 0.006757985334843397, -0.0002851024910341948, 0.03852051496505737, 0.0203865896910429, -0.00799694936722517, 0.0038013667799532413, -0.007321150507777929, 0.02477927878499031, 0.0008799459901638329, 0.0012178452452644706, 0...
[ "GradientCheckpointingLayer", "Identity", "LayerNorm", "Model", "ModelAttention", "ModelDropPath", "ModelGatedMLP", "ModelMLP", "ModelScale", "None", "Tensor", "_", "__init__", "attention", "attention_mask", "class", "config", "def", "drop_path", "drop_path_rate", "else", "...
dinov3_vit/modeling_dinov3_vit.py:DINOv3ViTPreTrainedModel
[ -0.00014444244152400643, 0.03900650516152382, -0.0014091945486143231, 0.015895714983344078, -0.0006200455827638507, 0.037202734500169754, 0.020517872646450996, -0.012682750821113586, 0.002184251556172967, 0.012288176454603672, 0.006905053276568651, -0.004283951595425606, -0.00023956307268235...
[ "Conv2d", "LayerNorm", "Linear", "Model", "ModelAttention", "ModelConfig", "ModelEmbeddings", "ModelLayer", "ModelLayerScale", "ModelRopePositionEmbedding", "None", "PreTrainedModel", "True", "_can_record_outputs", "_init_weights", "_no_split_modules", "_supports_attention_backend", ...
dinov3_vit/modeling_dinov3_vit.py:DINOv3ViTModel
[ -0.00006599888729397207, 0.032001156359910965, 0.0010559821967035532, 0.023273568600416183, -0.0001870697014965117, 0.03177737444639206, 0.029763314872980118, -0.0043358211405575275, 0.007049206178635359, 0.01588868722319603, 0.02640654891729355, 0.008503803983330727, -0.0014126384630799294,...
[ "BaseModelOutputWithPooling", "False", "LayerNorm", "Model", "ModelEmbeddings", "ModelLayer", "ModelPreTrainedModel", "ModelRopePositionEmbedding", "ModuleList", "None", "_", "__init__", "auto_docstring", "bool_masked_pos", "check_model_inputs", "class", "config", "def", "dtype",...
dinov3_vit/modeling_dinov3_vit.py:DINOv3ViTBackbone
[ -0.00011473087215563282, 0.017376035451889038, 0.006978439632803202, 0.04170248284935951, -0.0002942715655080974, 0.02735324203968048, 0.018160758540034294, -0.0014993835939094424, 0.0016114871250465512, 0.017263932153582573, 0.015246069058775902, -0.006165689788758755, -0.001135047408752143...
[ "BackboneMixin", "BackboneOutput", "False", "LayerNorm", "Model", "ModelEmbeddings", "ModelLayer", "ModelPreTrainedModel", "ModelRopePositionEmbedding", "ModuleList", "None", "_", "__init__", "_init_backbone", "append", "apply_layernorm", "batch_size", "can_return_tuple", "check_...
owlv2/modeling_owlv2.py:contrastive_loss
[ 0.00008122165309032425, 0.037291333079338074, 0.037291333079338074, -0.0017656880663707852, 0.00034784057061187923, 0.061926212161779404, 0.017854638397693634, 0.025877924636006355, 0.023052824661135674, -0.006780242547392845, 0.030059074983000755, -0.031415123492479324, 0.002161202253773808...
[ "Model_loss", "arange", "cross_entropy", "def", "device", "functional", "len", "logits", "nn", "return", "torch" ]
owlv2/modeling_owlv2.py:owlv2_loss
[ 0.00016533616872038692, 0.03712017834186554, 0.07967258244752884, -0.018786432221531868, 0.0008841506205499172, 0.04391045495867729, -0.004498558584600687, -0.012165912427008152, 0.026595251634716988, 0.01935228891670704, 0.033272355794906616, -0.005517099983990192, 0.0037346521858125925, ...
[ "Model_loss", "caption_loss", "contrastive_loss", "def", "image_loss", "return", "similarity", "t" ]
owlv2/modeling_owlv2.py:Owlv2Output
[ -0.00009819567640079185, 0.01683354377746582, 0.015711307525634766, 0.0037594917230308056, -0.0005225412896834314, 0.030075933784246445, 0.04623613506555557, -0.01363517064601183, 0.016272425651550293, 0.004965895786881447, 0.02379141002893448, 0.021210266277194023, -0.0035911560989916325, ...
[ "ModelOutput", "None", "class", "def", "else", "for", "getattr", "if", "image_embeds", "in", "k", "keys", "logits_per_image", "logits_per_text", "loss", "not", "r", "return", "self", "text_embeds", "text_model_output", "to_tuple", "tuple", "vision_model_output" ]
owlv2/modeling_owlv2.py:_upcast
[ -0.0003424626193009317, 0.023878658190369606, 0.021110117435455322, -0.023763300850987434, -0.001405899180099368, 0.011074160225689411, 0.013438954949378967, -0.032530345022678375, -0.003085768548771739, 0.042681656777858734, -0.0220329649746418, -0.04314308241009712, -0.004700750112533569, ...
[ "_upcast", "def", "dtype", "else", "float", "float32", "float64", "if", "in", "int", "int64", "is_floating_point", "return", "t", "torch" ]
owlv2/modeling_owlv2.py:box_area
[ 0.00032100427779369056, -0.029818546026945114, 0.015613648109138012, -0.008863386698067188, 0.0017682750476524234, 0.04202771559357643, 0.03357521444559097, 0.007982918061316013, 0.029231566935777664, 0.017844170331954956, 0.004461042582988739, 0.011974376626312733, -0.002920221770182252, ...
[ "Model_area", "Modeles", "_upcast", "def", "return" ]
owlv2/modeling_owlv2.py:box_iou
[ 0.00011689333041431382, 0.012335369363427162, 0.0266124177724123, -0.011421638540923595, 0.0008530535851605237, 0.04088946431875229, 0.02706928178668022, -0.028896745294332504, 0.029010960832238197, -0.00947995949536562, 0.05528073012828827, -0.014105723239481449, -0.00174179975874722, -0....
[ "Model_area", "Model_iou", "Modeles1", "Modeles2", "None", "area1", "area2", "clamp", "def", "inter", "iou", "left_top", "max", "min", "return", "right_bottom", "torch", "union", "width_height" ]
owlv2/modeling_owlv2.py:generalized_box_iou
[ 0.00010878764442168176, -0.007103921379894018, 0.031472355127334595, -0.038038529455661774, 0.0005908141611143947, 0.023208029568195343, -0.005688797682523727, -0.023208029568195343, 0.02207593061029911, -0.01267950888723135, 0.025132598355412483, -0.01879284344613552, 0.00026356682064943016...
[ "Model_box_iou", "None", "ValueError", "all", "area", "be", "bottom_right", "box_iou", "boxes1", "boxes2", "but", "clamp", "corner", "def", "f", "format", "got", "if", "in", "iou", "max", "min", "must", "not", "raise", "return", "top_left", "torch", "union", ...
owlv2/modeling_owlv2.py:Owlv2ObjectDetectionOutput
[ -0.0002072642819257453, 0.008576452732086182, 0.001222144579514861, -0.0017581728752702475, -0.0010792036773636937, 0.04688460752367973, 0.059005994349718094, -0.028588175773620605, 0.014751498587429523, 0.00520304823294282, 0.017152905464172363, 0.03293357789516449, -0.004574108403176069, ...
[ "ModelObjectDetectionOutput", "ModelOutput", "None", "class", "class_embeds", "def", "else", "for", "getattr", "if", "image_embeds", "in", "k", "keys", "logits", "loss", "loss_dict", "not", "objectness_logits", "pred_boxes", "r", "return", "self", "text_embeds", "text...
owlv2/modeling_owlv2.py:Owlv2ImageGuidedObjectDetectionOutput
[ -0.00023544378927908838, 0.002286141272634268, -0.016563739627599716, -0.001955441664904356, -0.0011933945352211595, 0.052911948412656784, 0.05889330059289932, -0.025420740246772766, 0.014493272639811039, 0.009489643387496471, 0.007045341189950705, 0.03496789559721947, -0.006211402826011181,...
[ "ModelImageGuidedObjectDetectionOutput", "ModelOutput", "None", "class", "class_embeds", "def", "else", "for", "getattr", "if", "image_embeds", "in", "k", "keys", "logits", "not", "query_image_embeds", "query_pred_boxes", "r", "return", "self", "target_pred_boxes", "text_...
owlv2/modeling_owlv2.py:Owlv2VisionEmbeddings
[ -0.00014045908756088465, 0.0182034969329834, 0.01202329806983471, 0.023035289719700813, -0.0006461117882281542, 0.006657760590314865, 0.029440224170684814, -0.008427545428276062, 0.0021068863570690155, 0.021237414330244064, 0.02427132986485958, 0.03685646504163742, -0.0005934396176598966, ...
[ "Conv2d", "Embedding", "False", "ModelVisionEmbeddings", "Module", "Parameter", "_", "__init__", "align_corners", "and", "arange", "batch_size", "bicubic", "cat", "class", "class_embedding", "class_embeds", "class_pos_embed", "config", "def", "dim", "else", "embed_dim", ...