identifier
stringlengths
24
102
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
owlv2/modeling_owlv2.py:Owlv2TextEmbeddings
[ -0.00015322833496611565, 0.022334136068820953, 0.015531862154603004, -0.00901301484555006, -0.0009282271494157612, 0.038319483399391174, 0.039679937064647675, -0.002139882417395711, 0.012527523562312126, -0.01655220240354538, 0.02618875913321972, 0.029249783605337143, -0.0005066277808509767,...
[ "Embedding", "False", "ModelTextEmbeddings", "Module", "None", "__init__", "arange", "class", "config", "def", "else", "embeddings", "expand", "forward", "hidden_size", "if", "input_ids", "inputs_embeds", "is", "max_position_embeddings", "nn", "not", "persistent", "posi...
owlv2/modeling_owlv2.py:Owlv2Attention
[ -0.0000686713246977888, 0.04031050205230713, 0.03941471502184868, -0.013436834327876568, -0.00029393075965344906, 0.02049117349088192, 0.026985643431544304, -0.016908016055822372, 0.001847564708441496, 0.029337089508771896, 0.01713196374475956, 0.006382496561855078, -0.001847564708441496, ...
[ "Attention", "False", "Linear", "ModelAttention", "Module", "None", "ValueError", "__init__", "_shape", "and", "attention_dropout", "attention_mask", "attn_output", "attn_probs", "attn_weights", "attn_weights_reshaped", "be", "bmm", "bsz", "but", "by", "causal_attention_mas...
owlv2/modeling_owlv2.py:Owlv2MLP
[ -0.00016017387679312378, 0.027564143761992455, 0.04351034387946129, 0.019363241270184517, -0.0006371361087076366, 0.04783859848976135, 0.03325921669602394, -0.028019750490784645, 0.005125564057379961, -0.009169064462184906, 0.04282693564891815, -0.03348701819777489, 0.0012315592030063272, ...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "activation_fn", "class", "config", "def", "fc1", "fc2", "forward", "hidden_act", "hidden_size", "hidden_states", "intermediate_size", "nn", "return", "self", "super" ]
owlv2/modeling_owlv2.py:Owlv2EncoderLayer
[ -0.00003186961112078279, 0.025657452642917633, 0.03781098499894142, 0.016092175617814064, -0.00009099322778638452, 0.04748879745602608, 0.025882519781589508, -0.01744256727397442, 0.007652223110198975, 0.010296741500496864, 0.009677811525762081, 0.02149374410510063, -0.00014945748262107372, ...
[ "False", "GradientCheckpointingLayer", "LayerNorm", "ModelAttention", "ModelEncoderLayer", "ModelMLP", "__init__", "attention_mask", "attn_weights", "causal_attention_mask", "class", "config", "def", "embed_dim", "eps", "forward", "hidden_size", "hidden_states", "if", "layer_no...
owlv2/modeling_owlv2.py:Owlv2PreTrainedModel
[ -0.00018734551849775016, 0.04863913729786873, 0.005712270736694336, 0.021378597244620323, -0.0008943097200244665, 0.03868508338928223, 0.016175340861082077, -0.0043266210705041885, 0.0037610495928674936, 0.015835998579859734, 0.009897499345242977, 0.004354899749159813, -0.004298342391848564,...
[ "LayerNorm", "Linear", "Model", "ModelAttention", "ModelConfig", "ModelEncoderLayer", "ModelForObjectDetection", "ModelMLP", "ModelModel", "ModelPreTrainedModel", "ModelTextEmbeddings", "ModelVisionEmbeddings", "None", "PreTrainedModel", "True", "_init_weights", "_no_split_modules", ...
owlv2/modeling_owlv2.py:Owlv2Encoder
[ -0.00005490372495842166, 0.022488566115498543, 0.024962307885289192, 0.03328307718038559, -0.00009355595102533698, 0.03778079152107239, 0.009726304560899734, -0.025974294170737267, 0.008376991376280785, 0.0032748975791037083, 0.01326825376600027, 0.002825126051902771, -0.00174286391120404, ...
[ "BaseModelOutput", "False", "ModelEncoder", "ModelEncoderLayer", "Module", "ModuleList", "None", "_", "__init__", "all_attentions", "attention_mask", "attentions", "causal_attention_mask", "class", "config", "def", "else", "encoder_layer", "encoder_states", "for", "forward", ...
owlv2/modeling_owlv2.py:Owlv2TextTransformer
[ -0.00015208779950626194, 0.05761402100324631, 0.036233820021152496, 0.016654053702950478, -0.0007454939186573029, 0.027794264256954193, 0.02284305915236473, -0.014966142363846302, 0.010746365413069725, 0.009958673268556595, 0.04028480499982834, 0.018116910010576248, 0.0006259335787035525, ...
[ "BaseModelOutputWithPooling", "LayerNorm", "ModelEncoder", "ModelTextEmbeddings", "ModelTextTransformer", "Module", "None", "__init__", "_create_4d_causal_attention_mask", "_prepare_4d_attention_mask", "arange", "argmax", "attention_mask", "attentions", "auto_docstring", "causal_attent...
owlv2/modeling_owlv2.py:Owlv2TextModel
[ 0.00013635653886012733, 0.0595213808119297, 0.029984453693032265, -0.0010419038590043783, 0.0004510254948399961, 0.01991504803299904, 0.03132704272866249, -0.004139645025134087, 0.0025313368532806635, 0.002237645909190178, 0.02237645909190178, 0.007719878107309341, -0.003971821162849665, 0...
[ "ModelPreTrainedModel", "ModelTextConfig", "ModelTextModel", "ModelTextTransformer", "None", "__init__", "attention_mask", "auto_docstring", "class", "config", "def", "embeddings", "forward", "get_input_embeddings", "input_ids", "input_modalities", "kwargs", "output_attentions", ...
owlv2/modeling_owlv2.py:Owlv2VisionTransformer
[ -0.00016215104551520199, 0.05910757929086685, 0.019627325236797333, 0.043315477669239044, -0.0004617779632098973, 0.018386518582701683, 0.04760190472006798, -0.026846572756767273, 0.003666023490950465, 0.028425782918930054, 0.026282567530870438, 0.010716068558394909, -0.0005640035960823298, ...
[ "BaseModelOutputWithPooling", "False", "LayerNorm", "ModelEncoder", "ModelVisionEmbeddings", "ModelVisionTransformer", "Module", "None", "__init__", "attentions", "auto_docstring", "class", "config", "def", "dtype", "else", "embeddings", "encoder", "encoder_outputs", "eps", "...
owlv2/modeling_owlv2.py:Owlv2VisionModel
[ 0.00005382284871302545, 0.04906543344259262, 0.0014912867918610573, 0.03562285006046295, 0.0005531063652597368, 0.01131417602300644, 0.04794522002339363, -0.00918576680123806, 0.005209001712501049, 0.030245816335082054, 0.020836006850004196, 0.006833313964307308, -0.0007911521242931485, 0....
[ "False", "ModelPreTrainedModel", "ModelVisionConfig", "ModelVisionModel", "ModelVisionTransformer", "None", "__init__", "auto_docstring", "class", "config", "def", "embeddings", "forward", "get_input_embeddings", "image", "input_modalities", "interpolate_pos_encoding", "kwargs", ...
owlv2/modeling_owlv2.py:Owlv2Model
[ -0.000054212348914006725, 0.05395282432436943, 0.02664199471473694, 0.01950773596763611, -0.00018549767264630646, 0.03923841938376427, 0.02318633906543255, -0.008527668192982674, 0.006103134714066982, 0.018838899210095406, 0.041021984070539474, 0.015940608456730843, -0.002145851030945778, ...
[ "False", "Linear", "ModelConfig", "ModelModel", "ModelOutput", "ModelPreTrainedModel", "ModelTextConfig", "ModelTextTransformer", "ModelVisionConfig", "ModelVisionTransformer", "Model_loss", "None", "Parameter", "True", "TypeError", "__init__", "attention_mask", "auto_docstring", ...
owlv2/modeling_owlv2.py:Owlv2BoxPredictionHead
[ -0.000302949600154534, -0.008040638640522957, 0.015967225655913353, 0.022810323163866997, -0.0009551823022775352, 0.023722736164927483, 0.04858598858118057, -0.03444358706474304, -0.0022525193635374308, 0.02007308416068554, 0.0010193362832069397, 0.015967225655913353, -0.003378779161721468, ...
[ "GELU", "Linear", "ModelBoxPredictionHead", "Module", "__init__", "class", "config", "def", "dense0", "dense1", "dense2", "forward", "gelu", "hidden_size", "image_features", "nn", "out_dim", "output", "return", "self", "super", "vision_config", "width" ]
owlv2/modeling_owlv2.py:Owlv2ClassPredictionHead
[ -0.0003084673953708261, 0.030071984976530075, 0.02123403549194336, 0.03305622935295105, -0.001370169222354889, 0.017905456945300102, 0.03305622935295105, -0.005451981909573078, -0.0019081936916336417, 0.028350306674838066, 0.021807927638292313, 0.026054736226797104, -0.002855116967111826, ...
[ "ELU", "Linear", "ModelClassPredictionHead", "Module", "None", "True", "__init__", "batch_size", "class", "config", "def", "dense0", "device", "dim", "dtype", "einsum", "elu", "finfo", "float32", "forward", "hidden_size", "if", "image_class_embeds", "image_embeds", "i...
owlv2/modeling_owlv2.py:Owlv2ForObjectDetection
[ -0.00018469935457687825, 0.021718524396419525, -0.010067440569400787, 0.01300849113613367, -0.0008872639155015349, 0.03416142612695694, 0.04343704879283905, -0.013574077747762203, 0.009501853957772255, 0.01708071306347847, 0.014026546850800514, 0.04569939523935318, -0.004355016630142927, -...
[ "False", "LayerNorm", "Model", "ModelBoxPredictionHead", "ModelClassPredictionHead", "ModelConfig", "ModelForObjectDetection", "ModelImageGuidedObjectDetectionOutput", "ModelModel", "ModelObjectDetectionOutput", "ModelPreTrainedModel", "None", "Sigmoid", "True", "_", "__init__", "all...
hubert/modeling_hubert.py:HubertPositionalConvEmbedding
[ -0.00022518663899973035, 0.015326988883316517, 0.011952763423323631, -0.0076634944416582584, -0.0010151270544156432, 0.02104601450264454, 0.015098227187991142, -0.02013096958398819, 0.011380860581994057, -0.00903606042265892, 0.024477429687976837, 0.017271457239985466, 0.0031454639974981546,...
[ "ACT2FN", "BatchNorm1d", "Conv1d", "GatheredParameters", "ModelPositionalConvEmbedding", "ModelSamePadLayer", "Module", "None", "__init__", "activation", "batch_norm", "class", "config", "conv", "conv_pos_batch_norm", "deepspeed", "def", "dim", "else", "feat_extract_activation"...
hubert/modeling_hubert.py:HubertSamePadLayer
[ -0.00006862095324322581, 0.02618260867893696, 0.03647662326693535, -0.001608440070413053, -0.0004895252641290426, 0.007440784014761448, 0.009734558872878551, -0.04184741526842117, 0.01812642067670822, 0.0049232253804802895, -0.008112132549285889, -0.0022797889541834593, 0.002447626320645213,...
[ "ModelSamePadLayer", "Module", "__init__", "class", "def", "else", "forward", "hidden_states", "if", "nn", "num_conv_pos_embeddings", "num_pad_remove", "return", "self", "super" ]
hubert/modeling_hubert.py:HubertNoLayerNormConvLayer
[ 0.000045173001126386225, 0.0243696216493845, 0.017374638468027115, 0.00046539207687601447, 0.00048302055802196264, 0.023467043414711952, 0.009871953167021275, -0.03474927693605423, 0.016584880650043488, 0.0016359236324205995, 0.02876969240605831, 0.00823602918535471, 0.004118014592677355, ...
[ "ACT2FN", "Conv1d", "GradientCheckpointingLayer", "ModelNoLayerNormConvLayer", "__init__", "activation", "class", "config", "conv", "conv_dim", "conv_kernel", "conv_stride", "def", "else", "feat_extract_activation", "forward", "hidden_states", "if", "in_conv_dim", "kernel_size"...
hubert/modeling_hubert.py:HubertLayerNormConvLayer
[ -0.000011282235391263384, 0.01938651315867901, 0.025022126734256744, 0.010425886139273643, 0.000192843668628484, 0.03246113657951355, 0.011553009040653706, -0.028741631656885147, 0.016906842589378357, 0.009298763237893581, 0.030883165076375008, 0.015216158702969551, 0.00300096464343369, -0...
[ "ACT2FN", "Conv1d", "GradientCheckpointingLayer", "LayerNorm", "ModelLayerNormConvLayer", "True", "__init__", "activation", "class", "config", "conv", "conv_dim", "conv_kernel", "conv_stride", "def", "elementwise_affine", "else", "feat_extract_activation", "forward", "hidden_st...
hubert/modeling_hubert.py:HubertGroupNormConvLayer
[ -0.000032619624107610434, 0.008350623771548271, -0.005811582785099745, 0.008237777277827263, 0.00018690162687562406, 0.031371261924505234, 0.009761202149093151, -0.023472024127840996, 0.013146590441465378, 0.0025390409864485264, 0.03746496140956879, 0.00668614124879241, 0.001227203174494207,...
[ "ACT2FN", "Conv1d", "GradientCheckpointingLayer", "GroupNorm", "ModelGroupNormConvLayer", "True", "__init__", "activation", "affine", "class", "config", "conv", "conv_dim", "conv_kernel", "conv_stride", "def", "else", "feat_extract_activation", "forward", "hidden_states", "if...
hubert/modeling_hubert.py:HubertFeatureEncoder
[ -0.00006962888437556103, 0.04106517508625984, -0.016358381137251854, 0.0315885990858078, 0, 0.02978353388607502, -0.0009307354339398444, -0.03948574513196945, 0.007671516388654709, 0.007953557185828686, 0.02211201749742031, 0.007333067245781422, 0.003172961762174964, -0.022676100954413414,...
[ "False", "ModelFeatureEncoder", "ModelGroupNormConvLayer", "ModelLayerNormConvLayer", "ModelNoLayerNormConvLayer", "Module", "ModuleList", "None", "True", "ValueError", "__init__", "_freeze_parameters", "_requires_grad", "and", "be", "but", "class", "config", "conv_layer", "con...
hubert/modeling_hubert.py:HubertFeatureProjection
[ -0.00017397853662259877, 0.04566892236471176, 0.034590817987918854, 0.028260471299290657, -0.00039388032746501267, 0.020347539335489273, 0.02769526280462742, -0.03346039727330208, 0.0011657444993034005, 0.022495334967970848, 0.017747577279806137, 0.012038961052894592, 0.002741265809163451, ...
[ "Dropout", "LayerNorm", "Linear", "ModelFeatureProjection", "Module", "__init__", "class", "config", "conv_dim", "def", "dropout", "eps", "feat_proj_dropout", "feat_proj_layer_norm", "forward", "hidden_size", "hidden_states", "if", "layer_norm", "layer_norm_eps", "nn", "pro...
hubert/modeling_hubert.py:eager_attention_forward
[ 0.00002095530362566933, 0.025862814858555794, 0.0269921962171793, -0.01146321278065443, 0.00008955634984886274, 0.03704368323087692, 0.060083046555519104, -0.023942869156599045, 0.021458230912685394, 0.014569009654223919, 0.023152301087975502, 0.026879258453845978, 0.003063444746658206, -0...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "shape", "size", "softmax", "tor...
hubert/modeling_hubert.py:HubertAttention
[ -0.00012168648390797898, 0.04424449801445007, 0.04559892416000366, -0.005925602745264769, -0.0006137231248430908, 0.02426675334572792, 0.03656943142414093, -0.019526271149516106, -0.001551943481899798, 0.028894366696476936, 0.01636595092713833, 0.0032872986048460007, -0.0034989272244274616, ...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelAttention", "Module", "None", "ValueError", "__init__", "_attn_implementation", "and", "attention_interface", "attention_mask", "attn_output", "attn_weights", "be", "bsz", "by", "class", "config", "contiguous", "current_stat...
hubert/modeling_hubert.py:HubertFeedForward
[ -0.0002094466472044587, 0.0419820211827755, 0.04266650974750519, 0.003279845230281353, -0.0009910836815834045, 0.02372896857559681, 0.04106936603784561, -0.029661210253834724, 0.0015543615445494652, -0.02099101059138775, 0.033768147230148315, -0.008556118234992027, 0.002581095788627863, -0...
[ "ACT2FN", "Dropout", "Linear", "ModelFeedForward", "Module", "__init__", "activation_dropout", "class", "config", "def", "else", "forward", "hidden_act", "hidden_dropout", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_dense", "intermediate_dropout",...
hubert/modeling_hubert.py:HubertEncoderLayer
[ -0.00005393683386500925, 0.037269916385412216, 0.045128028839826584, 0.0073529500514268875, -0.00036834910861216486, 0.035922810435295105, 0.028962764889001846, -0.020543357357382774, 0.007633597124367952, 0.002904695924371481, 0.0014663803158327937, 0.029636317864060402, 0.00146638031583279...
[ "Dropout", "False", "GradientCheckpointingLayer", "LayerNorm", "ModelAttention", "ModelEncoderLayer", "ModelFeedForward", "None", "_", "__init__", "attention", "attention_dropout", "attention_mask", "attn_residual", "attn_weights", "class", "config", "def", "dropout", "embed_di...
hubert/modeling_hubert.py:HubertEncoder
[ -0.00017824147653300315, 0.02780921757221222, 0.024063486605882645, 0.0031498195603489876, -0.000865490990690887, 0.04767294600605965, 0.0047956714406609535, -0.020204247906804085, 0.004767294507473707, 0.00042210420360788703, 0.029511824250221252, 0.016912544146180153, 0.0014614027459174395...
[ "BaseModelOutput", "Dropout", "False", "LayerNorm", "ModelEncoder", "ModelEncoderLayer", "ModelPositionalConvEmbedding", "Module", "ModuleList", "None", "True", "_", "__init__", "all_hidden_states", "all_self_attentions", "and", "attention_mask", "attentions", "class", "config"...
hubert/modeling_hubert.py:HubertAttnAdapterLayer
[ -0.00011129819176858291, 0.046888962388038635, 0.06102295592427254, -0.0012970182579010725, -0.0004767418431583792, 0.027370590716600418, 0.02860451117157936, -0.03432541340589523, -0.0002243491035187617, 0.026921892538666725, -0.010263971984386444, 0.008188742212951183, 0.000329512753523886...
[ "LayerNorm", "Linear", "ModelAttnAdapterLayer", "Module", "ReLU", "__init__", "act_fn", "adapter_attn_dim", "class", "config", "def", "forward", "hidden_dim", "hidden_size", "hidden_states", "input_dim", "linear_1", "linear_2", "nn", "norm", "return", "self", "super" ]
hubert/modeling_hubert.py:HubertEncoderLayerStableLayerNorm
[ -0.00017194294196087867, 0.03516853600740433, 0.053546931594610214, 0.01951286569237709, -0.0008295804145745933, 0.03289959952235222, 0.01985320635139942, -0.014124138280749321, 0.004112449940294027, 0.020760780200362206, -0.007998005487024784, 0.025865891948342323, 0.0006984074134379625, ...
[ "Dropout", "False", "GradientCheckpointingLayer", "LayerNorm", "ModelAttention", "ModelAttnAdapterLayer", "ModelEncoderLayerStableLayerNorm", "ModelFeedForward", "None", "_", "__init__", "adapter_attn_dim", "adapter_layer", "attention", "attention_dropout", "attention_mask", "attn_re...
hubert/modeling_hubert.py:HubertEncoderStableLayerNorm
[ -0.00016121646331157535, 0.018481429666280746, 0.03424166515469551, 0.02358366549015045, -0.0006873844540677965, 0.04104464873671532, -0.0027778835501521826, -0.006746288854628801, 0.0027637106832116842, 0.016667300835251808, 0.020068790763616562, 0.03038664534687996, 0.0010346198687329888, ...
[ "BaseModelOutput", "Dropout", "False", "LayerNorm", "ModelEncoderLayerStableLayerNorm", "ModelEncoderStableLayerNorm", "ModelPositionalConvEmbedding", "Module", "ModuleList", "None", "True", "_", "__init__", "all_hidden_states", "all_self_attentions", "and", "attention_mask", "atte...
hubert/modeling_hubert.py:HubertPreTrainedModel
[ -0.0002040661493083462, 0.04342103749513626, -0.005738587584346533, 0.003957646433264017, -0.0009258065838366747, 0.02227589674293995, 0.016961341723799706, -0.042290281504392624, 0.00024028567713685334, -0.015943661332130432, 0.028042752295732498, 0.02295435033738613, 0.0010812855325639248,...
[ "BatchNorm1d", "Conv1d", "GatheredParameters", "GroupNorm", "LayerNorm", "Linear", "Model", "ModelConfig", "ModelForSequenceClassification", "ModelModel", "ModelPreTrainedModel", "None", "PreTrainedModel", "True", "_conv_out_length", "_get_feat_extract_output_lengths", "_get_feature_...
hubert/modeling_hubert.py:_compute_mask_indices
[ 0, -0.008906038478016853, 0.00551726296544075, -0.014171244576573372, 0, 0.024309566244482994, -0.0006616514874622226, -0.05265205353498459, 0.013331051915884018, -0.0200525913387537, 0.040777336806058884, 0.030695026740431786, -0.006189417093992233, -0.012434846721589565, 0.005489256698...
[ "False", "None", "ValueError", "_", "_compute_mask_indices", "and", "append", "arange", "array", "attention_mask", "batch_size", "be", "bigger", "bool", "broadcast_to", "but", "choice", "compute_num_masked_span", "concatenate", "def", "detach", "dtype", "dummy_mask_idx", ...
hubert/modeling_hubert.py:HubertModel
[ -0.00008806395635474473, 0.0302834864705801, 0.007010066416114569, 0.0063931806944310665, -0.00028215517522767186, 0.030732130631804466, 0.02074979618191719, -0.03454560786485672, 0.007290469016879797, -0.02254437282681465, 0.026470011100172997, 0.019291702657938004, -0.002425482962280512, ...
[ "BaseModelOutput", "ModelEncoder", "ModelEncoderStableLayerNorm", "ModelFeatureEncoder", "ModelFeatureProjection", "ModelModel", "ModelPreTrainedModel", "None", "Parameter", "Tensor", "True", "__init__", "_compute_mask_indices", "_get_feature_vector_attention_mask", "_mask_hidden_states"...
hubert/modeling_hubert.py:HubertForCTC
[ -0.0003292037872597575, 0.04213808476924896, 0.0308552086353302, -0.00840459018945694, -0.0011513137724250555, 0.02567429654300213, 0.018305888399481773, -0.053651221096515656, -0.005641437601298094, -0.01726970635354519, 0.03338810056447983, -0.0027775445487350225, -0.00006611059507122263, ...
[ "Cannot", "CausalLMOutput", "Dropout", "False", "Linear", "Model", "ModelForCTC", "ModelModel", "ModelPreTrainedModel", "None", "Please", "True", "ValueError", "You", "_HIDDEN_STATES_START_POSITION", "__class__", "__init__", "_freeze_parameters", "_get_feat_extract_output_lengths...
hubert/modeling_hubert.py:HubertForSequenceClassification
[ -0.0003542772028595209, 0.03962179273366928, 0.0051244948990643024, 0.028055893257260323, -0.0011737669119611382, 0.011852183379232883, 0.006985344458371401, -0.0030059884302318096, -0.011737669818103313, -0.00260519003495574, 0.027712350711226463, -0.0109360720962286, -0.0018035931279882789...
[ "CrossEntropyLoss", "False", "Linear", "Model", "ModelForSequenceClassification", "ModelModel", "ModelPreTrainedModel", "None", "Parameter", "Sequence", "SequenceClassifierOutput", "True", "ValueError", "_HIDDEN_STATES_START_POSITION", "__init__", "_freeze_parameters", "_get_feature_...
convbert/modeling_convbert.py:ConvBertEmbeddings
[ -0.0002331148280063644, 0.017766553908586502, 0.011616592295467854, -0.004043029621243477, -0.0011388816637918353, 0.03986085578799248, 0.03279979154467583, -0.0021781111136078835, 0.007801339030265808, -0.011958257295191288, 0.024030402302742004, 0.02437206730246544, -0.0011459996458142996,...
[ "Dropout", "Embedding", "False", "LayerNorm", "ModelEmbeddings", "Module", "None", "__init__", "arange", "buffered_token_type_ids", "buffered_token_type_ids_expanded", "class", "config", "def", "device", "dropout", "dtype", "else", "embedding_size", "embeddings", "eps", "ex...
convbert/modeling_convbert.py:ConvBertPreTrainedModel
[ -0.00024528676294721663, 0.03344431519508362, -0.01638088934123516, -0.008304201066493988, -0.0009953666012734175, 0.03230675309896469, 0.012911326251924038, -0.015243327245116234, 0.0016423547640442848, -0.002687489613890648, 0.010920592583715916, -0.010636202991008759, -0.00335580715909600...
[ "GroupedLinearLayer", "Model", "ModelConfig", "ModelEmbeddings", "ModelPreTrainedModel", "PreTrainedModel", "SeparableModel1D", "True", "_init_weights", "arange", "base_model_prefix", "bias", "class", "config", "copy_", "def", "elif", "expand", "if", "init", "initializer_rang...
convbert/modeling_convbert.py:SeparableConv1D
[ -0.00009951653919415548, 0.01555628515779972, 0.008172686211764812, -0.01668355241417885, -0.0002835781197063625, 0.028519855812191963, 0.03607254475355148, -0.047570668160915375, 0.016458097845315933, -0.002733622444793582, 0.02558896131813526, -0.020741712301969528, 0.004199069458991289, ...
[ "Conv1d", "ModelConv1D", "Module", "Parameter", "__init__", "bias", "class", "config", "data", "def", "depthwise", "forward", "groups", "hidden_states", "initializer_range", "input_filters", "kernel_size", "kwargs", "mean", "nn", "normal_", "output_filters", "padding", ...
convbert/modeling_convbert.py:ConvBertSelfAttention
[ -0.00002413633410469629, 0.0444880910217762, 0.023816855624318123, -0.0296587273478508, 0, 0.021794669330120087, 0.014323816634714603, -0.029209351167082787, 0.006796791683882475, 0.0148293636739254, 0.014211473055183887, 0.003819684498012066, 0.0016500474885106087, -0.025951385498046875, ...
[ "Dropout", "False", "Linear", "ModelSelfAttention", "Model_attn_layer", "Model_kernel_layer", "Model_kernel_size", "Model_out", "Model_out_layer", "Module", "None", "SeparableModel1D", "The", "Unfold", "ValueError", "_", "__init__", "a", "all_head_size", "and", "attention", ...
convbert/modeling_convbert.py:ConvBertSelfOutput
[ -0.00012168083776487038, 0.04875698313117027, 0.03859927877783775, 0.020879726856946945, -0.0006630723946727812, 0.05620596557855606, 0.02370131015777588, -0.01952536590397358, 0.0035551965702325106, 0.017155233770608902, 0.01365646906197071, 0.003512872848659754, 0.0036962758749723434, -0...
[ "Dropout", "LayerNorm", "Linear", "ModelSelfOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "layer_norm_eps", "nn", "return", "self", "super" ]
convbert/modeling_convbert.py:ConvBertAttention
[ 0.00019809554214589298, 0.039166320115327835, 0.04459979757666588, 0.002589391777291894, 0.0007110214792191982, 0.043467823415994644, 0.03893992304801941, -0.023205477744340897, 0.01596084050834179, -0.0032544266432523727, 0.01901717111468315, 0.02501663751900196, 0.003226127475500107, -0....
[ "False", "ModelAttention", "ModelSelfAttention", "ModelSelfOutput", "Module", "None", "__init__", "attention_mask", "attention_output", "class", "config", "def", "encoder_hidden_states", "forward", "hidden_states", "nn", "output", "output_attentions", "outputs", "return", "se...
convbert/modeling_convbert.py:GroupedLinearLayer
[ -0.0000661963495076634, -0.013803912326693535, 0.0017675741109997034, 0.02457769773900509, -0.0000946914660744369, 0.03523925319314003, 0.01245718915015459, -0.031872447580099106, 0.008248679339885712, 0.009595402516424656, 0.011503259651362896, -0.024802150204777718, -0.00006225087417988107...
[ "ModelLinearLayer", "Module", "Parameter", "__init__", "batch_size", "bias", "class", "def", "empty", "forward", "group_in_dim", "group_out_dim", "hidden_states", "input_size", "list", "matmul", "nn", "num_groups", "output_size", "permute", "reshape", "return", "self", ...
convbert/modeling_convbert.py:ConvBertIntermediate
[ -0.00023661409795749933, 0.01212602760642767, 0.014801723882555962, 0.02459363453090191, -0.0006724821869283915, 0.03711817041039467, 0.022885741665959358, -0.02436591498553753, -0.0027183934580534697, -0.006632310803979635, 0.02618766576051712, -0.02539064921438694, -0.002149096457287669, ...
[ "ACT2FN", "GroupedLinearLayer", "Linear", "ModelIntermediate", "Module", "__init__", "class", "config", "def", "dense", "else", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "input_size", "intermediate_act_fn", "intermediate_size", "isinstance", "nn", "num_...
convbert/modeling_convbert.py:ConvBertOutput
[ -0.0002158506104024127, 0.027401478961110115, 0.022739816457033157, 0.03956728056073189, -0.0008776148315519094, 0.0452522374689579, 0.022853516042232513, -0.025468595325946808, -0.0004583494446706027, 0.009550723247230053, 0.013132244348526001, -0.0052017332054674625, 0.0005613892572000623,...
[ "Dropout", "GroupedLinearLayer", "LayerNorm", "Linear", "ModelOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "else", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "if", "input_size", "input_tensor", "intermediate_size...
convbert/modeling_convbert.py:ConvBertLayer
[ -0.000057372850278625265, 0.021526644006371498, 0.020517582073807716, 0.005129395518451929, -0.00009854115342022851, 0.04125940054655075, 0.020741818472743034, -0.0015065846964716911, 0.005549837835133076, 0.0025226534344255924, 0.006699046585708857, 0.01317385770380497, 0.002606741851195693...
[ "AttributeError", "False", "GradientCheckpointingLayer", "If", "ModelAttention", "ModelIntermediate", "ModelLayer", "ModelOutput", "None", "True", "__init__", "add_cross_attention", "and", "apply_chunking_to_forward", "are", "attention", "attention_mask", "attention_output", "be"...
convbert/modeling_convbert.py:ConvBertEncoder
[ -0.00006999672041274607, 0.022765222936868668, 0.01983504556119442, 0.025470001623034477, -0.00029935705242678523, 0.03854310140013695, 0.010368319228291512, -0.022990621626377106, 0.005606781225651503, 0.008903230540454388, 0.019158851355314255, 0.0034795852843672037, -0.0010847290977835655...
[ "BaseModelOutputWithCrossAttentions", "False", "ModelEncoder", "ModelLayer", "Module", "ModuleList", "None", "True", "_", "__init__", "add_cross_attention", "all_cross_attentions", "all_hidden_states", "all_self_attentions", "and", "attention_mask", "attentions", "class", "config...
convbert/modeling_convbert.py:ConvBertPredictionHeadTransform
[ -0.0002919524849858135, 0.041955187916755676, 0.0442478209733963, 0.03209686279296875, -0.0013899088371545076, 0.026136018335819244, 0.029574967920780182, -0.011348534375429153, -0.0021780014503747225, 0.020519066601991653, 0.01662158966064453, 0.00664863595739007, 0.0001226916938321665, 0...
[ "ACT2FN", "LayerNorm", "Linear", "ModelPredictionHeadTransform", "Module", "__init__", "class", "config", "def", "dense", "else", "eps", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "isinstance", "layer_norm_eps", "nn", "return", "self", "str", "super"...
convbert/modeling_convbert.py:ConvBertSequenceSummary
[ -0.00037931749830022454, 0.022525623440742493, 0.020658213645219803, 0.04575152322649956, -0.0016193939372897148, 0.025326736271381378, 0.020074648782610893, -0.01207980327308178, -0.0034576246980577707, 0.00682771485298872, 0.02917826734483242, 0.020658213645219803, 0.0016704559093341231, ...
[ "Dropout", "Identity", "Linear", "ModelSequenceSummary", "Module", "None", "NotImplementedError", "__init__", "activation", "activation_string", "and", "attn", "class", "cls_index", "config", "def", "dim", "dtype", "elif", "else", "expand", "first", "first_dropout", "fo...
convbert/modeling_convbert.py:ConvBertModel
[ -0.000022111014914116822, 0.03317790850996971, 0.017821915447711945, 0.005744485650211573, -0.00018914770043920726, 0.03766140714287758, 0.012946109287440777, -0.0021716956980526447, 0.00022242368140723556, 0.009079089388251305, 0.017934003844857216, 0.007902170531451702, 0.00012434709060471...
[ "Linear", "ModelEmbeddings", "ModelEncoder", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "and", "attention_mask", "auto_docstring", "batch_size", "buffered_token_type_ids", "buffered_token_type_ids_expanded", "class", "config", "def", "device", "dtype", "elif", "e...
convbert/modeling_convbert.py:ConvBertGeneratorPredictions
[ -0.00027219104231335223, 0.05099165439605713, 0.014882179908454418, 0.05606774613261223, -0.0013050748966634274, 0.01891998015344143, 0.04845361039042473, -0.006460481323301792, -0.0008760585333220661, 0.00046506812213920057, 0.022265587002038956, 0.011075111106038094, -0.001355547341518104,...
[ "LayerNorm", "Linear", "ModelGeneratorPredictions", "Module", "__init__", "activation", "class", "config", "def", "dense", "embedding_size", "eps", "forward", "gelu", "generator_hidden_states", "get_activation", "hidden_size", "hidden_states", "layer_norm_eps", "nn", "return"...
convbert/modeling_convbert.py:ConvBertForMaskedLM
[ -0.00012761284597218037, 0.04788453131914139, 0.010236997157335281, -0.010236997157335281, -0.0006817672401666641, 0.03848663344979286, 0.020473994314670563, 0.0013984969118610024, 0.004503159783780575, -0.007551883347332478, 0.015886925160884857, 0.032221369445323944, -0.0007516920450143516...
[ "CrossEntropyLoss", "Linear", "MaskedLMOutput", "Model", "ModelForMaskedLM", "ModelGeneratorPredictions", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "_tied_weights_keys", "attention_mask", "attentions", "auto_docstring", "class", "config", "def", "else", "embedding...
convbert/modeling_convbert.py:ConvBertClassificationHead
[ -0.0003871475928463042, 0.02466166391968727, 0.03774508088827133, 0.012678179889917374, -0.0013676803791895509, 0.016788456588983536, 0.049554891884326935, 0.0021709210705012083, -0.006020687986165285, 0.009262597188353539, 0.02443009987473488, -0.0017656824784353375, -0.0010058601619675756,...
[ "ACT2FN", "Dropout", "Linear", "ModelClassificationHead", "Module", "None", "__init__", "class", "classifier_dropout", "config", "def", "dense", "dropout", "else", "forward", "hidden_act", "hidden_dropout_prob", "hidden_size", "hidden_states", "if", "is", "kwargs", "nn", ...
convbert/modeling_convbert.py:ConvBertForSequenceClassification
[ -0.0003538649471011013, 0.029510192573070526, -0.0027451340574771166, 0.012696245685219765, -0.0011938473908230662, 0.02562125213444233, 0.019902221858501434, 0.009150447323918343, -0.005375887732952833, 0.011724010109901428, 0.056732773780822754, 0.0017014112090691924, -0.000271653902018442...
[ "BCEWithLogitsLoss", "CrossEntropyLoss", "MSELoss", "Model", "ModelClassificationHead", "ModelForSequenceClassification", "ModelModel", "ModelPreTrainedModel", "None", "SequenceClassifierOutput", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "class", "classifie...
convbert/modeling_convbert.py:ConvBertForMultipleChoice
[ -0.0002208053192589432, 0.05085084214806557, 0.016798939555883408, 0.03768410533666611, -0.0008974095690064132, 0.03904618322849274, 0.04086228460073471, -0.00760492542758584, 0.0009364273282699287, 0.009534533135592937, 0.03723008185625076, -0.00576044712215662, -0.0012060048757120967, -0...
[ "CrossEntropyLoss", "Linear", "Model", "ModelForMultipleChoice", "ModelModel", "ModelPreTrainedModel", "ModelSequenceSummary", "MultipleChoiceModelOutput", "None", "__init__", "attention_mask", "attentions", "auto_docstring", "class", "classifier", "config", "def", "else", "forwa...
convbert/modeling_convbert.py:ConvBertForTokenClassification
[ -0.0002618406433612108, 0.038759537041187286, 0.001560356467962265, -0.032603610306978226, -0.0010758622083812952, 0.03625156730413437, 0.04081151634454727, 0.015047821216285229, -0.003149212570860982, 0.009632885456085205, 0.05563133955001831, 0.021317746490240097, -0.0008264901698566973, ...
[ "CrossEntropyLoss", "Dropout", "Linear", "Model", "ModelForTokenClassification", "ModelModel", "ModelPreTrainedModel", "None", "TokenClassifierOutput", "__init__", "attention_mask", "attentions", "auto_docstring", "class", "classifier", "classifier_dropout", "config", "def", "dro...
convbert/modeling_convbert.py:ConvBertForQuestionAnswering
[ -0.00019604063709266484, 0.02790634147822857, 0.012771652080118656, 0.013503068126738071, -0.000604824919719249, 0.04726073890924454, 0.03803364187479019, 0.02498067542910576, 0.003277307143434882, 0.031057056039571762, 0.024643098935484886, 0.020479653030633926, 0.0010127300629392266, 0.0...
[ "CrossEntropyLoss", "Linear", "Model", "ModelForQuestionAnswering", "ModelModel", "ModelPreTrainedModel", "None", "QuestionAnsweringModelOutput", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "clamp", "class", "config", "contiguous", "def", "dim", "else",...
granitemoehybrid/modeling_granitemoehybrid.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
granitemoehybrid/modeling_granitemoehybrid.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
granitemoehybrid/modeling_granitemoehybrid.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
granitemoehybrid/modeling_granitemoehybrid.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridAttention
[ -0.00006807147292420268, 0.03260403871536255, 0.033053748309612274, -0.005902455188333988, -0.0002916094090323895, 0.03282889351248741, 0.041598256677389145, -0.01113034412264824, 0.0016020949697121978, 0.010118494741618633, 0.0158523079007864, 0.02327253855764866, -0.00099076924379915, -0...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attention_multiplier", "attn_output", "attn_weights", "cache_kwargs", ...
granitemoehybrid/modeling_granitemoehybrid.py:HybridMambaAttentionDynamicCache
[ -0.00007371895480901003, -0.017187047749757767, 0.030779417604207993, -0.03167808800935745, 0, 0.02325306460261345, -0.02235439419746399, -0.059761498123407364, 0.0017341505736112595, 0.031004084274172783, 0.00268196570686996, 0.010784029960632324, 0.0025134652387350798, 0.0021062558516860...
[ "Any", "False", "ModelMambaAttentionDynamicCache", "None", "_", "__getitem__", "__init__", "__len__", "append", "batch_size", "beam_idx", "cache_kwargs", "cache_position", "cat", "class", "config", "conv_kernel_size", "conv_states", "def", "device", "dim", "dtype", "else"...
granitemoehybrid/modeling_granitemoehybrid.py:pad_tensor_by_size
[ -0.000021551304598688148, 0.01097853947430849, 0.013430598191916943, -0.0059072342701256275, 0.00009230053547071293, 0.023963307961821556, 0.0064645204693078995, -0.05394531041383743, 0.006074420176446438, 0.019616475328803062, -0.012538940645754337, -0.05060159042477608, 0.00058166752569377...
[ "Model", "Model_shape", "Model_size", "Model_tensor_by_size", "constant", "def", "else", "functional", "if", "input_tensor", "len", "mode", "nn", "return", "shape", "torch", "value" ]
granitemoehybrid/modeling_granitemoehybrid.py:reshape_into_chunks
[ -0.00011610210640355945, -0.015866706147789955, 0.012905665673315525, -0.01273806020617485, 0.00019728628103621304, 0.014414120465517044, 0.00466503482311964, -0.048494018614292145, 0.006508701480925083, 0.0032683180179446936, -0.010503312572836876, -0.02704044245183468, 0.001829699380323290...
[ "Model", "Model_into_chunks", "chunk_size", "def", "else", "if", "input_tensor", "len", "pad_size", "pad_tensor_by_size", "return", "shape" ]
granitemoehybrid/modeling_granitemoehybrid.py:segment_sum
[ 0.0000940322206588462, 0.008414141833782196, 0.02875296212732792, 0.0057394481264054775, 0.0003012513625435531, 0.02875296212732792, 0.0039005957078188658, -0.02696983329951763, 0.01281624287366867, 0.013874975964426994, 0.022177673876285553, -0.0011632133973762393, 0.0012050054501742125, ...
[ "Model_sum", "None", "bool", "chunk_size", "cumsum", "def", "device", "diagonal", "dim", "dtype", "expand", "inf", "input_tensor", "mask", "masked_fill", "ones", "return", "size", "tensor_segsum", "torch", "tril" ]
granitemoehybrid/modeling_granitemoehybrid.py:apply_mask_to_padding_states
[ 0.00005794858589069918, -0.0029697560239583254, 0.05510082468390465, -0.006776063237339258, 0.00009846902685239911, -0.00038864766247570515, -0.003457744140177965, -0.06960104405879974, 0.019296443089842796, 0.0010666025336831808, 0.03480052202939987, 0.028777355328202248, 0.0040712147019803...
[ "Model_mask_to_padding_states", "None", "and", "attention_mask", "def", "hidden_states", "if", "is", "not", "return", "shape" ]
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridMambaLayer
[ -0.000266497052507475, 0.0038317954167723656, 0.009046494029462337, -0.006309497635811567, -0.0009723540861159563, 0.05946485325694084, 0.010083671659231186, -0.03895178437232971, 0.013713793829083443, 0.011869922280311584, 0.01751677878201008, -0.02028258517384529, 0.002146381651982665, 0...
[ "A", "ACT2FN", "A_cumsum", "A_log", "B", "B_decay", "C", "C_reshaped", "C_times_states", "Conv1d", "D", "D_residual", "Falling", "False", "G", "G_intermediate", "L", "Linear", "M", "M_intermediate", "ModelMambaLayer", "ModelRMSNormGated", "Module", "None", "NotImpleme...
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridRMSNormGated
[ -0.0001462102954974398, 0.01472088135778904, -0.003109643468633294, 0.04450498893857002, -0.0004778580623678863, 0.03469106927514076, 0.02613241598010063, -0.03377814590930939, 0.011012132279574871, 0.02430657111108303, 0.02533360943198204, -0.016204381361603737, 0.0021111341193318367, 0.0...
[ "ModelRMSNormGated", "Module", "None", "Parameter", "True", "__init__", "class", "def", "eps", "float32", "forward", "functional", "gate", "hidden_size", "hidden_states", "if", "is", "keepdim", "mean", "nn", "not", "ones", "pow", "return", "rsqrt", "self", "silu",...
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridMLP
[ -0.00028635814669542015, 0.034821148961782455, 0.0458173044025898, 0.015119710005819798, -0.0010810019448399544, 0.04535913094878197, 0.019472353160381317, -0.032301198691129684, 0.0011096377857029438, 0.009850719943642616, 0.04238100349903107, -0.04261009022593498, 0.0009020281722769141, ...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "activation", "chunk", "chunked_hidden_states", "class", "config", "def", "dim", "forward", "hidden_act", "hidden_size", "hidden_states", "input_linear", "input_size", "nn", "output_linear", "return", "self", "shared_i...
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridRotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridParallelExperts
[ -0.00028040233883075416, 0.01748853363096714, -0.01657409965991974, 0.039777837693691254, -0.0010001611663028598, 0.04755052179098129, 0.039777837693691254, -0.013030671514570713, -0.007058280520141125, -0.0056294784881174564, 0.009087178856134415, -0.035434283316135406, -0.00141451368108391...
[ "F", "ModelParallelExperts", "Module", "Parameter", "__init__", "append", "cat", "class", "def", "dim", "empty", "expert_size", "for", "forward", "i", "in", "input_list", "input_size", "inputs", "linear", "nn", "num_experts", "output_list", "output_size", "range", "...
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridTopKGating
[ -0.0002463419805280864, 0.011824414134025574, -0.0191932525485754, 0.006254944019019604, -0.0009996485896408558, 0.031988754868507385, 0.047526147216558456, -0.01456630788743496, -0.0018850516062229872, -0.008511293679475784, 0.0095966262742877, -0.0413568876683712, -0.001842209487222135, ...
[ "Linear", "ModelTopKGating", "Module", "_", "__init__", "batch_gates", "batch_index", "class", "def", "device", "dim", "div", "dtype", "expert_size", "flatten", "float", "forward", "gates", "hidden_states", "index_sorted_experts", "input_size", "layer", "logits", "long"...
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridMoE
[ -0.0004193915519863367, 0.02967888116836548, -0.0038724434562027454, 0.009577646851539612, -0.0016480055637657642, 0.0699995905160904, 0.043276771903038025, -0.02447620779275894, -0.004108928609639406, -0.0036950798239558935, 0.024594450369477272, -0.04398622736334801, 0.00038244074676185846...
[ "ACT2FN", "ModelMoE", "ModelParallelExperts", "ModelTopKGating", "Module", "None", "_", "__init__", "activation", "batch_gates", "batch_index", "bsz", "chunk", "chunked_hidden_states", "class", "config", "def", "device", "dim", "dtype", "emb_size", "expert_inputs", "exper...
granitemoehybrid/modeling_granitemoehybrid.py:GraniteFlashAttentionKwargs
[ -0.00025716997333802283, 0.0005216355784796178, 0.008988182060420513, -0.026264168322086334, -0.0012986172223463655, 0.015058123506605625, 0.06536860018968582, -0.040621913969516754, 0.021128064021468163, 0.01949384994804859, 0.020194226875901222, 0.0037061660550534725, 0.0005252833943814039...
[ "False", "IntTensor", "LongTensor", "ModelFlashAttentionKwargs", "TypedDict", "class", "cu_seq_lens_k", "cu_seq_lens_q", "int", "max_length_k", "max_length_q", "seq_idx", "torch", "total" ]
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridRMSNorm
[ -0.00009884802420856431, 0.04179859161376953, 0.03230918198823929, 0.053095508366823196, -0.00046952810953371227, 0.03931327164173126, 0.022028988227248192, -0.02937198430299759, 0.007964326068758965, 0.042476408183574677, 0.019995542243123055, 0.006975846365094185, 0.0028524715453386307, ...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tuple", ...
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridDecoderLayer
[ -0.0002448446466587484, 0.037698980420827866, 0.006841456517577171, -0.008062130771577358, -0.0007309854845516384, 0.04882699251174927, 0.025321904569864273, -0.05177932232618332, -0.0002448446466587484, -0.01016282755881548, 0.005109801422804594, 0.002412961795926094, -0.0016181038226932287...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "ModelMambaLayer", "ModelMoE", "ModelRMSNorm", "None", "Tensor", "_", "__init__", "attention_mask", "auto_docstring", "block_sparse_moe", "cache_params", "cache_position", "class", "config",...
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridPreTrainedModel
[ -0.00030663562938570976, 0.02696959115564823, -0.00791873037815094, 0.00854993425309658, -0.0012121970066800714, 0.04682379961013794, 0.017558924853801727, -0.011591185815632343, -0.0037872190587222576, -0.001936646061949432, 0.007402291987091303, -0.013771705329418182, -0.002668268047273159...
[ "A_log", "D", "False", "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelMambaLayer", "ModelParallelExperts", "ModelPreTrainedModel", "ModelRMSNormGated", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_init_weights", "_is_stateful", "_no_split...
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridModel
[ -0.0001622071285964921, 0.03449073061347008, -0.0008615591796115041, -0.010551441460847855, -0.0006913746474310756, 0.060812607407569885, 0.011856188997626305, -0.010608169250190258, 0.007658304180949926, -0.012763840146362782, 0.02496039867401123, -0.012536927126348019, -0.00012232013978064...
[ "Embedding", "False", "ModelDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "MoeModelOutputWithPast", "None", "True", "ValueError", "You", "__init__", "_update_mamba_mask", "all", "and", "arange", "attention_mask", "aut...
granitemoehybrid/modeling_granitemoehybrid.py:load_balancing_loss_func
[ -0.00027690583374351263, 0.01840798556804657, 0.001643570140004158, -0.028812499716877937, -0.0009432663209736347, 0.05808233842253685, 0.038874007761478424, -0.011547867208719254, 0, -0.02252405695617199, 0.031556546688079834, -0.0061455233953893185, -0.0008396499906666577, 0.014349081553...
[ "Model_balancing_loss_func", "None", "_", "attention_mask", "batch_size", "cat", "compute_device", "concatenated_gate_logits", "def", "device", "dim", "else", "expand", "expert_attention_mask", "expert_mask", "float", "for", "functional", "gate_logits", "if", "in", "is", ...
granitemoehybrid/modeling_granitemoehybrid.py:GraniteMoeHybridForCausalLM
[ -0.00042586459312587976, 0.03261321783065796, -0.005416124127805233, -0.008328018710017204, -0.0015578636666759849, 0.050550490617752075, 0.029701324179768562, -0.007163260597735643, -0.006842952221632004, 0.0024459913838654757, 0.028536567464470863, -0.0036253088619560003, 0.001368590514175...
[ "False", "GenerationMixin", "HybridMambaAttentionDynamicCache", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "MoeCausalLMOutputWithPast", "None", "True", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "and", "attention_mask", "attentions", "auto_do...
gpt2/modeling_gpt2.py:eager_attention_forward
[ -0.000011272472875134554, 0.03738809749484062, 0.017117442563176155, -0.01813097484409809, -0.00019267680181656033, 0.032658278942108154, 0.05292893573641777, -0.017680516466498375, 0.018806664273142815, 0.02117157354950905, 0.020270656794309616, 0.03423488512635231, 0.0022945257369428873, ...
[ "Model_attention_forward", "None", "attention_mask", "attn_dropout", "attn_output", "attn_weights", "bias", "causal_mask", "def", "device", "dim", "dtype", "finfo", "float", "full", "functional", "if", "is", "is_cross_attention", "key", "key_length", "kwargs", "layer_idx"...
gpt2/modeling_gpt2.py:GPT2Attention
[ -0.0001785278582246974, 0.04593050852417946, 0.03069613128900528, -0.006508716847747564, -0.0009166206000372767, 0.036835357546806335, 0.028422344475984573, -0.0132448123767972, 0.0023874768521636724, 0.016939716413617134, 0.012960589490830898, 0.014495396055281162, 0.002160098170861602, -...
[ "ALL_ATTENTION_FUNCTIONS", "Conv1D", "Dropout", "EncoderDecoderCache", "False", "If", "Model", "Module", "None", "Please", "True", "ValueError", "_", "__init__", "_attn_implementation", "_upcast_and_reordered_attn", "alpha", "and", "as", "attention", "attention_interface", ...
gpt2/modeling_gpt2.py:GPT2MLP
[ -0.00018339804955758154, 0.04053723067045212, 0.03755991905927658, -0.0044373380951583385, -0.0005868737353011966, 0.02759738080203533, 0.026795797049999237, -0.024620069190859795, 0.008702908642590046, -0.014428505674004555, 0.026108724996447563, -0.023474950343370438, 0.0014170854119583964...
[ "ACT2FN", "Conv1D", "Dropout", "Model", "Module", "__init__", "act", "activation_function", "c_fc", "c_proj", "class", "config", "def", "dropout", "embed_dim", "forward", "hidden_size", "hidden_states", "intermediate_size", "nn", "resid_pdrop", "return", "self", "super"...
gpt2/modeling_gpt2.py:GPT2Block
[ -0.00005699498797184788, 0.031201688572764397, 0.015039662830531597, 0.004713924136012793, -0.0001946598058566451, 0.04309873282909393, 0.029854852706193924, 0.010437974706292152, 0.004124683327972889, 0.009652320295572281, 0.011504219844937325, 0.012626582756638527, 0.0007365506608039141, ...
[ "False", "GradientCheckpointingLayer", "If", "LayerNorm", "Model", "ModelAttention", "ModelMLP", "None", "True", "ValueError", "__init__", "add_cross_attention", "are", "attention", "attention_mask", "attn", "attn_output", "be", "by", "cache_position", "class", "config", ...
gpt2/modeling_gpt2.py:GPT2SequenceSummary
[ -0.00035416518221609294, 0.03277934715151787, 0.028478512540459633, 0.03812633454799652, -0.0015183696523308754, 0.026502452790737152, 0.01917940564453602, -0.008950389921665192, -0.0032110975589603186, -0.00807859841734171, 0.03510412573814392, 0.012088838033378124, 0.0032256273552775383, ...
[ "Dropout", "Identity", "Linear", "Model", "Module", "None", "NotImplementedError", "__init__", "activation", "activation_string", "and", "attn", "class", "cls_index", "config", "def", "dim", "dtype", "elif", "else", "expand", "first", "first_dropout", "forward", "full...
gpt2/modeling_gpt2.py:GPT2PreTrainedModel
[ -0.00010925993410637602, 0.044033948332071304, 0.0007757893763482571, 0.002513417275622487, -0.0003124219656456262, 0.015501746907830238, 0.018534697592258453, -0.01831003464758396, 0.0004879399493802339, -0.00025801139418035746, 0.007526210509240627, 0.008874188177287579, 0.0011303357314318...
[ "Conv1D", "Embedding", "False", "LayerNorm", "Linear", "Model", "ModelAttention", "ModelBlock", "ModelConfig", "None", "PreTrainedModel", "True", "_can_compile_fullgraph", "_init_weights", "_is_hf_initialized", "_no_split_modules", "_skip_keys_device_placement", "_supports_attentio...
gpt2/modeling_gpt2.py:GPT2DoubleHeadsModelOutput
[ -0.00011762732901843265, 0.016301555559039116, 0.021395791321992874, 0.013980847783386707, -0.0008278134046122432, 0.07245136052370071, 0.04935748875141144, -0.0021084477193653584, 0.013754437677562237, -0.02445233426988125, 0.023207075893878937, 0.017660018056631088, -0.0036791705060750246,...
[ "Model", "ModelOutput", "None", "attentions", "class", "hidden_states", "logits", "loss", "mc_logits", "mc_loss", "past_key_values", "r" ]
gpt2/modeling_gpt2.py:GPT2Model
[ -0.00006692701572319493, 0.02575596608221531, 0.006047052796930075, -0.0025615987833589315, -0.00042693314026109874, 0.02642786130309105, 0.03449060022830963, -0.0065229786559939384, 0.0067189475521445274, -0.004087360110133886, 0.01388582494109869, 0.007278860080987215, -0.00090985751012340...
[ "BaseModelOutputWithPastAndCrossAttentions", "Dropout", "DynamicCache", "Embedding", "EncoderDecoderCache", "False", "LayerNorm", "Model", "ModelBlock", "ModelPreTrainedModel", "ModuleList", "None", "Setting", "True", "_", "__init__", "_attn_implementation", "_prepare_4d_attention_...
gpt2/modeling_gpt2.py:GPT2LMHeadModel
[ -0.00020926549041178077, 0.041642073541879654, 0.021833952516317368, -0.010804430581629276, -0.000706930470187217, 0.02971218340098858, 0.041642073541879654, -0.010860702954232693, 0.005542897619307041, 0.015869006514549255, 0.016881922259926796, 0.013336718082427979, 0.0003095019201282412, ...
[ "CausalLMOutputWithCrossAttentions", "GenerationMixin", "Linear", "Model", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "_tied_weights_keys", "attention_mask", "attentions", "auto_docstring", "cache_position", "class", "config", "cross_attentions", "def", "else", "en...
gpt2/modeling_gpt2.py:GPT2DoubleHeadsModel
[ -0.00017114703950937837, 0.04246553033590317, 0.013649635016918182, 0.00988615583628416, -0.0008811878506094217, 0.051677629351615906, 0.03505091369152069, 0.0007512916345149279, -0.002078339457511902, 0.009043585509061813, 0.029209095984697342, 0.026063499972224236, -0.0012919408036395907, ...
[ "CrossEntropyLoss", "GenerationMixin", "Linear", "Model", "ModelModel", "ModelOutput", "ModelPreTrainedModel", "ModelSequenceSummary", "None", "__init__", "_tied_weights_keys", "attention_mask", "attentions", "auto_docstring", "cache_position", "class", "config", "contiguous", "d...
gpt2/modeling_gpt2.py:GPT2ForSequenceClassification
[ -0.00036455370718613267, 0.03728455305099487, 0.008863658644258976, 0.006090191658586264, -0.0012652158038690686, 0.026305053383111954, 0.02321706898510456, -0.010750760324299335, -0.001107957330532372, -0.009321138262748718, 0.034997157752513885, 0.026533791795372963, 0.002101544989272952, ...
[ "BCEWithLogitsLoss", "Cannot", "CrossEntropyLoss", "Linear", "MSELoss", "Model", "ModelModel", "ModelPreTrainedModel", "None", "Results", "SequenceClassifierOutputWithPast", "ValueError", "__class__", "__init__", "__name__", "and", "arange", "argmax", "attention_mask", "attenti...
gpt2/modeling_gpt2.py:GPT2ForTokenClassification
[ -0.00024608205421827734, 0.04237568378448486, 0.01960158534348011, -0.0233406163752079, -0.0006975275464355946, 0.028325991705060005, 0.04101603478193283, 0.006005110219120979, -0.002266079420223832, 0.0069681936874985695, 0.046001408249139786, 0.013993039727210999, 0.0004921641084365547, ...
[ "CrossEntropyLoss", "Dropout", "Linear", "Model", "ModelModel", "ModelPreTrainedModel", "None", "TokenClassifierOutput", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "class", "classifier", "classifier_dropout", "config", "def", "device", "dropout", "el...
gpt2/modeling_gpt2.py:GPT2ForQuestionAnswering
[ -0.00023138936376199126, 0.0312004704028368, 0.018539410084486008, 0.00983493123203516, -0.0006747384322807193, 0.04973987862467766, 0.033461373299360275, 0.02272208221256733, 0.003165265079587698, 0.029843928292393684, 0.01910463534295559, 0.02758302353322506, 0.0011092558270320296, 0.001...
[ "CrossEntropyLoss", "Linear", "Model", "ModelModel", "ModelPreTrainedModel", "None", "QuestionAnsweringModelOutput", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "clamp", "class", "config", "contiguous", "def", "device", "dim", "else", "end_logits", ...
arcee/modeling_arcee.py:ArceeMLP
[ -0.0001993271434912458, 0.01876356638967991, 0.033865947276353836, 0.027458878234028816, -0.0007579794037155807, 0.05903658643364906, 0.03249300643801689, -0.005091333296149969, -0.0006685950793325901, -0.005949423648416996, 0.03340829908847809, -0.0469089150428772, -0.001308587146922946, ...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
arcee/modeling_arcee.py:ArceeRMSNorm
[ -0.00009884802420856431, 0.04179859161376953, 0.03230918198823929, 0.053095508366823196, -0.00046952810953371227, 0.03931327164173126, 0.022028988227248192, -0.02937198430299759, 0.007964326068758965, 0.042476408183574677, 0.019995542243123055, 0.006975846365094185, 0.0028524715453386307, ...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tuple", ...
arcee/modeling_arcee.py:ArceeRotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
arcee/modeling_arcee.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
arcee/modeling_arcee.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
arcee/modeling_arcee.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
arcee/modeling_arcee.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
arcee/modeling_arcee.py:ArceeAttention
[ -0.00009043229511007667, 0.03348980098962784, 0.028994524851441383, -0.007192440330982208, -0.0003863127203658223, 0.030792634934186935, 0.04225558787584305, -0.007361013442277908, 0.003174788085743785, 0.009102932177484035, 0.014047735370695591, 0.028320234268903732, -0.0009271505405195057,...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_kwargs", "cache_position", "cla...