identifier
stringlengths
24
102
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
pix2struct/modeling_pix2struct.py:Pix2StructTextModel
[ 0.0001007683458738029, 0.04801982641220093, 0.029705287888646126, -0.011502423323690891, 0.0002861646644305438, 0.030598679557442665, 0.018872907385230064, -0.02702511101961136, 0.007873018272221088, 0.004774064291268587, 0.01820286363363266, 0.012675000354647636, 0.00021985820785630494, 0...
[ "AttentionMaskConverter", "BlockMask", "CausalLMOutputWithCrossAttentions", "CrossEntropyLoss", "Dropout", "DynamicCache", "Embedding", "EncoderDecoderCache", "False", "Linear", "ModelLayerNorm", "ModelPreTrainedModel", "ModelTextBlock", "ModelTextConfig", "ModelTextModel", "ModuleList...
pix2struct/modeling_pix2struct.py:Pix2StructForConditionalGeneration
[ -0.00011900139361387119, 0.038976456969976425, -0.003556041745468974, 0.031136365607380867, -0.0005565065075643361, 0.012208143249154091, 0.011704137548804283, -0.038976456969976425, -0.006720078643411398, 0.009240108542144299, 0.0224002618342638, 0.0030100352596491575, -0.002786032622680068...
[ "BaseModelOutput", "GenerationMixin", "ModelConfig", "ModelForConditionalGeneration", "ModelPreTrainedModel", "ModelTextModel", "ModelVisionModel", "None", "Seq2SeqLMOutput", "__init__", "_shift_right", "and", "attention_mask", "attentions", "auto_docstring", "cache_position", "class...
sam_hq/modeling_sam_hq.py:SamHQVisionEncoderOutput
[ -0.00009729940938996151, 0.036517657339572906, 0.029304293915629387, 0.043054770678281784, -0.0005846770363859832, 0.03832099959254265, 0.03989892452955246, -0.034488897770643234, 0.017244448885321617, 0.00027648883406072855, 0.02693740837275982, 0.04080059379339218, -0.0028036320582032204, ...
[ "ModelOutput", "ModelVisionEncoderOutput", "None", "attentions", "class", "hidden_states", "image_embeds", "intermediate_embeddings", "last_hidden_state", "r" ]
sam_hq/modeling_sam_hq.py:SamHQMMaskDecoderOutputs
[ -0.0001159395178547129, 0.03126649931073189, 0.019258350133895874, -0.024129578843712807, -0.0009133555577136576, 0.05279053747653961, 0.02480928599834442, -0.05505622550845146, 0.018805211409926414, -0.008949468843638897, 0.010535450652241707, 0.02718825824558735, -0.0036534222308546305, ...
[ "FloatTensor", "ModelMMaskDecoderOutputs", "ModelOutput", "None", "class", "iou_scores", "mask_decoder_attentions", "masks", "r", "torch" ]
sam_hq/modeling_sam_hq.py:SamHQImageSegmentationOutput
[ -0.00006864943861728534, 0.04029765725135803, -0.0008325384114868939, -0.013152708299458027, -0.00041801825864240527, 0.02720092050731182, 0.03537238761782646, -0.029327740892767906, 0.01869363524019718, -0.018581697717308998, 0.028656112030148506, 0.03313362970948219, -0.005037207156419754,...
[ "ModelImageSegmentationOutput", "ModelOutput", "None", "class", "iou_scores", "mask_decoder_attentions", "pred_masks", "r", "vision_attentions", "vision_hidden_states" ]
sam_hq/modeling_sam_hq.py:SamHQVisionAttention
[ -0.00003578346149879508, 0.03932340443134308, 0.037312548607587814, -0.0017804453382268548, -0.00010211377957602963, 0.0022063557989895344, 0.03708912059664726, -0.02602941356599331, 0.002639248501509428, 0.03641883656382561, 0.013293991796672344, 0.03150340914726257, 0.0015151241095736623, ...
[ "F", "Linear", "ModelVisionAttention", "Module", "None", "Parameter", "_", "__init__", "arange", "attention_dropout", "attn_output", "attn_probs", "attn_weights", "batch_size", "bhwc", "bhwk", "class", "config", "decomposed_rel_pos", "def", "dim", "dropout", "dtype", "e...
sam_hq/modeling_sam_hq.py:SamHQMLPBlock
[ -0.00022680613619741052, 0.005571930203586817, 0.04206093028187752, 0.016230033710598946, -0.0008357895421795547, 0.05897673964500427, 0.023087793961167336, -0.04046078771352768, 0.002843113150447607, -0.008515052497386932, 0.022287720814347267, -0.03360302746295929, 0.001857310184277594, ...
[ "ACT2FN", "Linear", "ModelMLPBlock", "Module", "__init__", "act", "class", "config", "def", "forward", "hidden_act", "hidden_size", "hidden_states", "lin1", "lin2", "mlp_dim", "nn", "return", "self", "super" ]
sam_hq/modeling_sam_hq.py:SamHQVisionSdpaAttention
[ -0.00012180607882328331, 0.052045371383428574, 0.031631022691726685, 0.005748546216636896, -0.0003785627777688205, 0.009478090330958366, 0.048680368810892105, -0.025349685922265053, 0.005103587172925472, 0.03320135921239853, 0.008244256488978863, 0.03544469177722931, 0.0006940317689441144, ...
[ "False", "If", "ModelVisionAttention", "ModelVisionSdpaAttention", "None", "The", "True", "_", "__class__", "__init__", "__name__", "attention", "attn_bias", "attn_implementation", "attn_mask", "attn_output", "batch_size", "be", "class", "config", "decomposed_rel_pos", "def...
sam_hq/modeling_sam_hq.py:SamHQVisionLayer
[ -0.00009699767542770132, 0.01536651886999607, 0.013863272964954376, 0.020711395889520645, -0.0001035221794154495, 0.028728710487484932, 0.029619522392749786, -0.03251466527581215, 0.0021852748468518257, 0.030064929276704788, 0.024497348815202713, 0.008407045155763626, 0.0016702738357707858, ...
[ "F", "GradientCheckpointingLayer", "LayerNorm", "ModelMLPBlock", "ModelVisionLayer", "Model_VISION_ATTENTION_CLASSES", "__init__", "_attn_implementation", "attn", "attn_weights", "batch_size", "channel", "class", "config", "contiguous", "def", "eps", "forward", "height", "hidde...
sam_hq/modeling_sam_hq.py:SamHQPositionalEmbedding
[ -0.00032549293246120214, 0.03610801696777344, 0.009374196641147137, 0.01573939248919487, -0.0015912988455966115, 0.010531505569815636, 0.03379340097308159, 0, 0.010415773838758469, 0.02603943645954132, 0.004166309721767902, 0.004282040521502495, 0.002618409926071763, -0.003790184622630477,...
[ "ModelPositionalEmbedding", "Module", "None", "Parameter", "__init__", "cat", "class", "clone", "config", "coordinates", "cos", "def", "dim", "dtype", "forward", "if", "input_coords", "input_shape", "is", "nn", "not", "np", "num_pos_feats", "pi", "positional_embedding...
sam_hq/modeling_sam_hq.py:SamHQPreTrainedModel
[ -0.00021509319776669145, 0.048719048500061035, -0.0017844768008217216, 0.020960520952939987, -0.0009205634705722332, 0.017674818634986877, 0.017561517655849457, -0.00393717922270298, -0.001926101977005601, 0.01586201600730419, -0.0017349080881103873, 0.011160061694681644, -0.0010763511527329...
[ "Model", "ModelConfig", "ModelPositionalEmbedding", "ModelPreTrainedModel", "ModelVisionAttention", "ModelVisionEncoder", "PreTrainedModel", "True", "_init_weights", "_no_split_modules", "_supports_sdpa", "base_model_prefix", "class", "config", "def", "elif", "if", "image", "init...
sam_hq/modeling_sam_hq.py:SamHQPatchEmbeddings
[ -0.0001002231365418993, 0.009902749210596085, 0.018567655235528946, 0.013447484001517296, -0.00009538781159790233, 0, 0.017779937013983727, -0.0124347023665905, 0.008045983500778675, 0.0012659764615818858, 0.009115030989050865, 0.0006646376568824053, -0.003024277277290821, -0.0128848273307...
[ "Conv2d", "Input", "Iterable", "Make", "ModelPatchEmbeddings", "Module", "ValueError", "__init__", "abc", "batch_size", "channel", "class", "collections", "config", "configuration", "def", "dimension", "doesn", "else", "embeddings", "f", "forward", "height", "hidden_siz...
sam_hq/modeling_sam_hq.py:SamHQVisionNeck
[ -0.00006912002572789788, 0.03223370388150215, 0.018145546317100525, 0.007833016104996204, 0, 0.03786896914243698, 0.014257215894758701, -0.04756161943078041, 0.019498011097311974, 0.037418145686388016, 0.013186515308916569, 0.021752115339040756, 0.005860673729330301, -0.012172168120741844,...
[ "Conv2d", "ModelLayerNorm", "ModelVisionNeck", "Module", "__init__", "channels_first", "class", "config", "conv1", "conv2", "data_format", "def", "forward", "hidden_size", "hidden_states", "kernel_size", "layer_norm1", "layer_norm2", "nn", "output_channels", "padding", "per...
sam_hq/modeling_sam_hq.py:SamHQVisionEncoder
[ 0.0000797968459664844, 0.030754229053854942, 0.006987046450376511, 0.039509084075689316, 0.00048404239350929856, 0.028172669932246208, 0.030978713184595108, -0.0252543855458498, 0.007071227766573429, 0.020427992567420006, 0.01234658807516098, 0.023683002218604088, -0.000382323341909796, -0...
[ "False", "ModelPatchEmbeddings", "ModelPreTrainedModel", "ModelVisionAttention", "ModelVisionEncoder", "ModelVisionEncoderOutput", "ModelVisionLayer", "ModelVisionNeck", "ModuleList", "None", "Parameter", "__init__", "_can_record_outputs", "and", "append", "attentions", "check_model_...
sam_hq/modeling_sam_hq.py:SamHQLayerNorm
[ -0.0001214670337503776, 0.036052823066711426, 0.017125090584158897, 0.0576845183968544, -0.000281662680208683, 0.018477071076631546, -0.0048164320178329945, -0.03875678405165672, 0.010590516962110996, 0.047995321452617645, 0.009407533332705498, 0.00543608982115984, 0.003577115945518017, 0....
[ "LayerNorm", "ModelLayerNorm", "NotImplementedError", "Unsupported", "__init__", "channels_first", "channels_last", "class", "data", "data_format", "def", "else", "eps", "f", "features", "format", "forward", "if", "in", "kwargs", "nn", "normalized_shape", "not", "permut...
sam_hq/modeling_sam_hq.py:eager_attention_forward
[ 0.0000415438917116262, 0.02851148322224617, 0.024890977889299393, -0.012558629736304283, 0.00017766642849892378, 0.03733646497130394, 0.06019090861082077, -0.023080725222826004, 0.022628162056207657, 0.012388918548822403, 0.024438414722681046, 0.02919032797217369, 0.00250324048101902, -0.0...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "softmax", "...
sam_hq/modeling_sam_hq.py:SamHQAttention
[ -0.00009485405462328345, 0.038222670555114746, 0.03237685188651085, -0.009218408726155758, 0, 0.022933602333068848, 0.03485008329153061, -0.019448595121502876, 0.001433350145816803, 0.020460370928049088, 0.0028104905504733324, 0.0063798134215176105, -0.0034850083757191896, -0.0042438409291...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelAttention", "Module", "None", "ValueError", "__init__", "_attn_implementation", "_recombine_heads", "_separate_heads", "attention_downModelple_rate", "attention_interface", "attention_mask", "attention_similarity", "attn_output", "a...
sam_hq/modeling_sam_hq.py:SamHQTwoWayAttentionBlock
[ -0.00009303367551183328, 0.028310323134064674, 0.03527555987238884, -0.027636267244815826, -0.0001983548136195168, 0.04358891025185585, 0.021906796842813492, -0.022131482139229774, 0.008875061757862568, 0.016514355316758156, 0.020671028643846512, 0.036623671650886536, -0.00039319891948252916...
[ "False", "LayerNorm", "ModelAttention", "ModelMLPBlock", "ModelTwoWayAttentionBlock", "Module", "_", "__init__", "attention_downModelple_rate", "attention_similarity", "attn_out", "class", "config", "cross_attn_image_to_token", "cross_attn_token_to_image", "def", "downModelple_rate",...
sam_hq/modeling_sam_hq.py:SamHQTwoWayTransformer
[ -0.0002629486261866987, 0.04207178205251694, 0.04229919612407684, -0.018420618027448654, -0.0008670198149047792, 0.04298144206404686, 0.02353745698928833, -0.024901947006583214, 0.009551431983709335, 0.006452901754528284, 0.012280412018299103, 0.020467353984713554, -0.0004797035944648087, ...
[ "LayerNorm", "ModelAttention", "ModelTwoWayAttentionBlock", "ModelTwoWayTransformer", "Module", "ModuleList", "None", "_", "__init__", "append", "attention_similarity", "attn_out", "class", "config", "def", "final_attn_token_to_image", "flatten", "for", "forward", "hidden_size"...
sam_hq/modeling_sam_hq.py:SamHQFeedForward
[ -0.00017931373440660536, 0.038859594613313675, 0.014771191403269768, 0.03386011719703674, -0.00036217825254425406, 0.038405098021030426, 0.047267813235521317, -0.026360895484685898, 0.0034087365493178368, -0.007669657003134489, 0.038859594613313675, -0.03590535745024681, 0.003366127144545316...
[ "F", "False", "Linear", "ModelFeedForward", "Module", "ModuleList", "ReLU", "_", "__init__", "activation", "class", "def", "for", "forward", "hidden_dim", "hidden_states", "if", "in", "input_dim", "layer", "layers", "nn", "num_layers", "output_dim", "proj_in", "proj...
sam_hq/modeling_sam_hq.py:SamHQMaskDecoder
[ -0.0003727467264980078, 0.038169264793395996, 0.015477171167731285, -0.015360801480710506, -0.0015928103821352124, 0.04119487479329109, 0.02967427484691143, -0.07214921712875366, 0.003243805607780814, -0.010415088385343552, 0.01629175804555416, 0.017688196152448654, -0.004887527786195278, ...
[ "Conv2d", "ConvTranspose2d", "Embedding", "GELU", "ModelFeedForward", "ModelLayerNorm", "ModelMaskDecoder", "ModelTwoWayTransformer", "Module", "ModuleList", "None", "True", "_", "__init__", "activation", "and", "attention_similarity", "batch_size", "cat", "channels_first", "...
sam_hq/modeling_sam_hq.py:SamHQVisionModel
[ 0.00008263205381808802, 0.03668159618973732, -0.008607798255980015, 0.04860874265432358, 0.0004448065592441708, 0.021491365507245064, 0.048158660531044006, -0.0168780367821455, 0.010408122092485428, 0.028242580592632294, 0.024641932919621468, 0.005569751840084791, -0.0008474180358462036, -...
[ "ModelPreTrainedModel", "ModelVisionConfig", "ModelVisionEncoder", "ModelVisionModel", "None", "__init__", "auto_docstring", "class", "config", "def", "forward", "get_input_embeddings", "kwargs", "main_input_name", "patch_embed", "pixel_values", "post_init", "return", "self", "...
sam_hq/modeling_sam_hq.py:SamHQMaskEmbedding
[ -0.000023781873096595518, 0.005298953503370285, 0.03021530993282795, 0.0033259389456361532, 0, 0.05344051122665405, -0.009244983084499836, -0.04284260421991348, 0.013360127806663513, 0.006088159512728453, 0.030666284263134003, 0.012965524569153786, 0.0017898061778396368, -0.001359970774501...
[ "ACT2FN", "Conv2d", "ModelLayerNorm", "ModelMaskEmbedding", "Module", "__init__", "activation", "channels_first", "class", "config", "conv1", "conv2", "conv3", "data_format", "def", "dense_embeddings", "eps", "forward", "hidden_act", "hidden_size", "hidden_states", "kernel_...
sam_hq/modeling_sam_hq.py:SamHQPromptEncoder
[ -0.00015911966329440475, 0.01103229634463787, -0.005233268719166517, 0.01855688914656639, -0.0007319504511542618, 0.03688747435808182, 0.022177744656801224, -0.025798602029681206, 0.004186614882200956, 0.0009547179797664285, -0.0028995138127356768, 0.018217433243989944, -0.003238969016820192...
[ "Embedding", "ModelMaskEmbedding", "ModelPositionalEmbedding", "ModelPromptEncoder", "Module", "ModuleList", "None", "Tensor", "__init__", "_embed_boxes", "_embed_points", "also", "batch_size", "be", "box_embeddings", "boxes", "cat", "class", "config", "coords", "corner_embed...
sam_hq/modeling_sam_hq.py:SamHQModel
[ -0.00013262829452287406, 0.03485225513577461, -0.009781117551028728, 0.010961596854031086, -0.0004128165601287037, 0.046544626355171204, 0.021361060440540314, -0.031704310327768326, 0.0036257589235901833, 0.007026664447039366, 0.01832554116845131, 0.01967466063797474, -0.003161999164149165, ...
[ "False", "Got", "ModelImageSegmentationOutput", "ModelMaskDecoder", "ModelModel", "ModelPositionalEmbedding", "ModelPreTrainedModel", "ModelPromptEncoder", "ModelTwoWayAttentionBlock", "ModelVisionEncoder", "None", "Of", "OutputRecorder", "The", "True", "ValueError", "You", "__init...
glm46v/modeling_glm46v.py:Glm46VPreTrainedModel
[ -0.0003988422977272421, 0.03176034614443779, 0.00702844699844718, 0.021526455879211426, -0.001999725354835391, 0.03293665498495102, 0.028349049389362335, -0.03058403544127941, -0.005499245133250952, 0.016233066096901894, 0.01729174330830574, 0.005881545599550009, -0.0057639144361019135, 0....
[ "ModelConfig", "ModelPreTrainedModel", "None", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn", "_supports_sdpa", "base_model_prefix", "class", "config", ...
glm46v/modeling_glm46v.py:Glm46VModelOutputWithPast
[ -0.0002648343506734818, 0.008071142248809338, 0.014355102553963661, 0.0052174171432852745, -0.0014196563279256225, 0.042200542986392975, 0.04911866411566734, -0.03851087763905525, 0.016949398443102837, 0.0025078190956264734, 0.008071142248809338, 0.03182335942983627, -0.001830419758334756, ...
[ "ModelModelOutputWithPast", "ModelOutput", "None", "attentions", "class", "hidden_states", "last_hidden_state", "past_key_values", "r", "rope_deltas" ]
glm46v/modeling_glm46v.py:Glm46VModel
[ -0.00008553066436434165, 0.03997396305203438, -0.02223270945250988, 0.0007088080164976418, -0.00041756511200219393, 0.05344833433628082, 0.03682994470000267, -0.04738486558198929, 0.006652969866991043, 0.035257935523986816, 0.02919446863234043, 0.014316517859697342, 0.0003789666516240686, ...
[ "AutoModel", "False", "Image", "ModelConfig", "ModelModel", "ModelModelOutputWithPast", "ModelPreTrainedModel", "None", "True", "ValueError", "Video", "You", "_", "__init__", "_checkpoint_conversion_mapping", "_no_split_modules", "accepts_loss_kwargs", "add", "all", "and", "a...
glm46v/modeling_glm46v.py:Glm46VCausalLMOutputWithPast
[ -0.0002988479973282665, 0.017503436654806137, 0.020285440608859062, -0.0002644352207425982, -0.0015503872418776155, 0.049844224005937576, 0.049612391740083694, -0.02874736674129963, 0.016923854127526283, 0.0007607040461152792, 0.017155686393380165, 0.02156052552163601, -0.0010577408829703927...
[ "ModelCausalLMOutputWithPast", "ModelOutput", "None", "attentions", "class", "hidden_states", "logits", "loss", "past_key_values", "r", "rope_deltas" ]
glm46v/modeling_glm46v.py:Glm46VForConditionalGeneration
[ -0.00029316675500012934, 0.03434137627482414, -0.0069649312645196915, 0.02171921357512474, -0.0011015962809324265, 0.037525344640016556, 0.02854200452566147, -0.05526459589600563, -0.00328346760943532, 0.012337878346443176, 0.039344754070043564, 0.0018620530609041452, 0.0008564022718928754, ...
[ "False", "GenerationMixin", "Linear", "ModelCausalLMOutputWithPast", "ModelForConditionalGeneration", "ModelModel", "ModelPreTrainedModel", "None", "Tensor", "True", "__init__", "_checkpoint_conversion_mapping", "_expand_dict_for_generation", "_expand_dict_for_generation_visual", "_expan...
t5/modeling_t5.py:T5LayerNorm
[ -0.0001398334134137258, 0.03849904239177704, 0.044577836990356445, 0.05245776101946831, -0.0005487801972776651, 0.01587241142988205, 0.03872418403625488, -0.027354583144187927, 0.003658534726127982, 0.05245776101946831, 0.008386487141251564, 0.0036303920205682516, 0.00042741536162793636, 0...
[ "Module", "Parameter", "T5LayerNorm", "True", "__init__", "bfloat16", "class", "def", "dtype", "eps", "float16", "float32", "forward", "hidden_size", "hidden_states", "if", "in", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "super", "to", "...
t5/modeling_t5.py:T5DenseActDense
[ -0.00033321554656140506, 0.04218798503279686, 0.03175688907504082, -0.009445936419069767, -0.0011300353799015284, 0.026077738031744957, 0.05238727852702141, -0.0405653715133667, -0.0037088338285684586, -0.006026855204254389, 0.00990954041481018, -0.021209893748164177, -0.00004866938979830593...
[ "ACT2FN", "Dropout", "Linear", "Module", "T5DenseActDense", "Tensor", "__init__", "act", "and", "class", "config", "d_ff", "d_model", "def", "dense_act_fn", "dropout", "dropout_rate", "dtype", "forward", "hidden_states", "if", "int8", "isinstance", "nn", "return", "...
t5/modeling_t5.py:T5DenseGatedActDense
[ -0.00034810524084605277, 0.029160648584365845, 0.016446607187390327, -0.002886904403567314, -0.0013486800016835332, 0.026827797293663025, 0.05132274329662323, -0.04502404108643532, -0.0013559701619669795, -0.003674241714179516, 0.01901274360716343, -0.01901274360716343, -0.000634244119282811...
[ "ACT2FN", "Dropout", "Linear", "Module", "T5DenseGatedActDense", "Tensor", "__init__", "act", "and", "class", "config", "d_ff", "d_model", "def", "dense_act_fn", "dropout", "dropout_rate", "dtype", "forward", "hidden_gelu", "hidden_linear", "hidden_states", "if", "int8"...
t5/modeling_t5.py:T5LayerFF
[ -0.00017676934658084065, 0.03183624893426895, 0.023536084219813347, 0.023308683186769485, -0.0005685044452548027, 0.028766324743628502, 0.04184192791581154, -0.04229672998189926, 0.001335985492914915, -0.006765203084796667, 0.017509937286376953, -0.030471839010715485, 0.00151364307384938, ...
[ "DenseReluDense", "Dropout", "Module", "T5DenseActDense", "T5DenseGatedActDense", "T5LayerFF", "T5LayerNorm", "__init__", "class", "config", "d_model", "def", "dropout", "dropout_rate", "else", "eps", "forward", "forwarded_states", "hidden_states", "if", "is_gated_act", "la...
t5/modeling_t5.py:T5Attention
[ -0.0002360922226216644, 0.025107432156801224, 0.037490732967853546, -0.02135835774242878, -0.001107680844143033, 0.010111137293279171, 0.05225981026887894, -0.03590022027492523, 0.0038626817986369133, 0.01897258311510086, -0.001640219590626657, 0.02499382384121418, 0.0003017720009665936, 0...
[ "Embedding", "EncoderDecoderCache", "False", "Instantiating", "Linear", "Module", "None", "Please", "T5Attention", "True", "__class__", "__init__", "__name__", "_relative_position_bucket", "a", "abs", "and", "arange", "attn_output", "attn_weights", "batch_size", "bidirectio...
t5/modeling_t5.py:T5LayerSelfAttention
[ 0.0000738053786335513, 0.04226034879684448, 0.035105157643556595, 0.0024456221144646406, 0.00018080134759657085, 0.02057117410004139, 0.045390743762254715, -0.0262729674577713, 0.007434690836817026, 0.011124086566269398, 0.011291786096990108, 0.022248173132538795, 0.0007022428908385336, -0...
[ "Dropout", "False", "Module", "None", "SelfAttention", "T5Attention", "T5LayerNorm", "T5LayerSelfAttention", "__init__", "attention_mask", "attention_output", "cache_position", "class", "config", "d_model", "def", "dropout", "dropout_rate", "eps", "forward", "has_relative_att...
t5/modeling_t5.py:T5LayerCrossAttention
[ -0.00013990365550853312, 0.03468906879425049, 0.04865479841828346, 0.00641972990706563, -0.0007355940761044621, 0.0280440840870142, 0.05248410999774933, -0.03446381539106369, 0.002801592694595456, 0.002435555448755622, 0.00639157323166728, 0.01903393678367138, -0.0013656005030497909, -0.00...
[ "Dropout", "EncDecAttention", "False", "Module", "None", "T5Attention", "T5LayerCrossAttention", "T5LayerNorm", "__init__", "attention_mask", "attention_output", "cache_position", "class", "config", "d_model", "def", "dropout", "dropout_rate", "eps", "forward", "has_relative_...
t5/modeling_t5.py:T5Block
[ -0.00018783639825414866, 0.02494608610868454, 0.0329604372382164, -0.011965090408921242, -0.0009030257351696491, 0.01580294966697693, 0.035895273089408875, -0.03228316828608513, 0.0018907100893557072, 0.010046160779893398, -0.003950737416744232, 0.009312452748417854, 0.0035838831681758165, ...
[ "False", "GradientCheckpointingLayer", "ModuleList", "None", "T5Block", "T5LayerFF", "T5LayerSelfAttention", "True", "__init__", "and", "any", "append", "attention_mask", "attention_outputs", "cache_position", "clamp", "clamp_value", "class", "config", "cross_attention_outputs"...
t5/modeling_t5.py:T5ClassificationHead
[ -0.0002842158137355, 0.03248997777700424, 0.03340518847107887, 0.010067316703498363, -0.001215514144860208, 0.014128563925623894, 0.06314953416585922, -0.01144013274461031, -0.0013156152563169599, 0.0012369643663987517, 0.025168292224407196, 0.006492275279015303, -0.00038431695429608226, -...
[ "Dropout", "Linear", "Module", "T5ClassificationHead", "__init__", "class", "classifier_dropout", "config", "d_model", "def", "dense", "dropout", "forward", "hidden_states", "nn", "num_labels", "out_proj", "p", "return", "self", "super", "tanh", "torch" ]
t5/modeling_t5.py:T5PreTrainedModel
[ -0.00017662391474004835, 0.050164707005023956, -0.0032477709464728832, -0.0024323132820427418, -0.0007029807311482728, 0.019458506256341934, 0.051289476454257965, -0.035992614924907684, -0.011303930543363094, 0.014959430322051048, 0.0006397124961949885, -0.017433922737836838, -0.003402426838...
[ "DUMMY_INPUTS", "DUMMY_MASK", "In", "None", "PreTrainedModel", "See", "T5", "T5Attention", "T5Block", "T5ClassificationHead", "T5Config", "T5DenseActDense", "T5DenseGatedActDense", "T5EncoderModel", "T5ForConditionalGeneration", "T5ForQuestionAnswering", "T5ForTokenClassification", ...
t5/modeling_t5.py:T5Stack
[ -0.00017732214473653585, 0.02721409872174263, 0.0061542256735265255, -0.0019196666544303298, -0.0009457181440666318, 0.02518150955438614, 0.055105727165937424, -0.026875333860516548, -0.0006669430295005441, -0.004093406721949577, -0.01524441223591566, -0.010332323610782623, -0.00337353185750...
[ "BaseModelOutputWithPastAndCrossAttentions", "Dropout", "DynamicCache", "Embedding", "EncoderDecoderCache", "False", "ModuleList", "None", "Setting", "T5Block", "T5LayerNorm", "T5PreTrainedModel", "T5Stack", "True", "ValueError", "You", "__init__", "all_attentions", "all_cross_at...
t5/modeling_t5.py:T5Model
[ -0.000053482999646803364, 0.049848772585392, 0.01933595910668373, -0.005113411229103804, -0.00018337028450332582, 0.02101248688995838, 0.04716632887721062, -0.03353056684136391, 0.0010757723357528448, 0.006678170990198851, 0.0023331684060394764, -0.00656640250235796, -0.004135436378419399, ...
[ "BaseModelOutput", "Embedding", "EncDecAttention", "False", "None", "Seq2SeqModelOutput", "T5Model", "T5PreTrainedModel", "T5Stack", "True", "__init__", "_keys_to_ignore_on_load_unexpected", "_tied_weights_keys", "and", "attention_mask", "attentions", "auto_docstring", "block", "...
t5/modeling_t5.py:T5ForConditionalGeneration
[ -0.00024269719142466784, 0.044897209852933884, 0.015192490071058273, 0.005838904529809952, -0.0008467828738503158, 0.027210429310798645, 0.04693799093365669, -0.020181069150567055, -0.007993063889443874, -0.0018707170384004712, 0.01303833071142435, -0.017006518319249153, -0.00510195549577474...
[ "BaseModelOutput", "CrossEntropyLoss", "Embedding", "EncDecAttention", "False", "GenerationMixin", "Linear", "None", "Seq2SeqLMOutput", "T5ForConditionalGeneration", "T5PreTrainedModel", "T5Stack", "True", "__init__", "_keys_to_ignore_on_load_unexpected", "_shift_right", "_tied_weigh...
t5/modeling_t5.py:T5EncoderModel
[ -0.000042719810153357685, 0.047110091894865036, 0.02534971572458744, 0.01738586835563183, -0.00045567797496914864, 0.0252375490963459, 0.0403800792992115, -0.027929555624723434, -0.0019208579324185848, 0.0028742763679474592, -0.00454275868833065, 0.0004241310525685549, -0.00409409124404192, ...
[ "Embedding", "False", "None", "T5EncoderModel", "T5PreTrainedModel", "T5Stack", "__init__", "_keys_to_ignore_on_load_unexpected", "_tied_weights_keys", "attention_mask", "auto_docstring", "class", "config", "d_model", "decoder", "def", "else", "embed_tokens", "encoder", "encode...
t5/modeling_t5.py:T5ForSequenceClassification
[ -0.00028502821805886924, 0.040109310299158096, 0.029912028461694717, 0.027192752808332443, -0.000984320999123156, 0.01960144378244877, 0.031951483339071274, 0.005013663787394762, -0.006401627324521542, -0.00484370905905962, 0.030365241691470146, -0.0026059721130877733, 0.0017420357326045632,...
[ "All", "BCEWithLogitsLoss", "CrossEntropyLoss", "EncDecAttention", "False", "If", "MSELoss", "None", "NotImplementedError", "Passing", "Please", "Seq2SeqSequenceClassifierOutput", "T5ClassificationHead", "T5ForSequenceClassification", "T5Model", "T5PreTrainedModel", "ValueError", "...
t5/modeling_t5.py:T5ForTokenClassification
[ -0.0002485894365236163, 0.03545595705509186, 0.010568602941930294, -0.02147812768816948, -0.0009659475763328373, 0.0363650843501091, 0.06318433582782745, 0.014091470278799534, -0.004744507372379303, 0.009489014744758606, 0.041365284472703934, 0.014318752102553844, -0.0006356787052936852, 0...
[ "CrossEntropyLoss", "Dropout", "Linear", "None", "T5EncoderModel", "T5ForTokenClassification", "T5PreTrainedModel", "TokenClassifierOutput", "__init__", "attention_mask", "attentions", "auto_docstring", "class", "classifier", "classifier_dropout", "config", "def", "dropout", "els...
t5/modeling_t5.py:T5ForQuestionAnswering
[ -0.00014214482507668436, 0.03908456116914749, 0.0053348178043961525, 0.008086460642516613, -0.0005580500583164394, 0.043127790093421936, 0.056605227291584015, -0.0021479660645127296, -0.0008774371817708015, 0.03616445139050484, -0.005475207697600126, 0.0065140933729708195, -0.001137158600613...
[ "BaseModelOutput", "CrossEntropyLoss", "Embedding", "EncDecAttention", "False", "If", "Linear", "None", "Please", "Seq2SeqQuestionAnsweringModelOutput", "T5ForQuestionAnswering", "T5PreTrainedModel", "T5Stack", "True", "ValueError", "__init__", "_keys_to_ignore_on_load_unexpected", ...
openai/modeling_openai.py:Attention
[ -0.00017864904657471925, 0.03595023229718208, 0.02309460937976837, -0.020477980375289917, -0.0010096781188622117, 0.03981829434633255, 0.022867077961564064, -0.030944501981139183, 0.004038712475448847, 0.024346042424440384, 0.014505235478281975, 0.012514321133494377, 0.003981829155236483, ...
[ "Conv1D", "Dropout", "False", "Model", "Model_mask", "Module", "None", "True", "ValueError", "__init__", "_attn", "a", "attn_dropout", "attn_outputs", "attn_pdrop", "b", "bias", "c_attn", "c_proj", "class", "config", "contiguous", "def", "dim", "else", "f", "forwa...
openai/modeling_openai.py:MLP
[ -0.00009012820373754948, 0.03792166709899902, 0.025357257574796677, -0.0017633003881201148, -0.00028912414563819766, 0.04134832322597504, 0.022501710802316666, -0.02649947814643383, 0.009937304072082043, -0.006110870745033026, 0.02924080193042755, -0.029469246044754982, 0.0022987155243754387...
[ "ACT_FNS", "Conv1D", "Dropout", "Model", "Module", "__init__", "act", "afn", "c_fc", "c_proj", "class", "config", "def", "dropout", "forward", "h", "h2", "n_embd", "n_state", "nn", "nx", "resid_pdrop", "return", "self", "super", "x" ]
openai/modeling_openai.py:Block
[ -0.00019122152298223227, 0.029966378584504128, 0.022074470296502113, 0.023561351001262665, -0.0008184995967894793, 0.062220267951488495, 0.04758020490407944, -0.010465357452630997, 0.008349411189556122, 0.012981617823243141, 0.023218223825097084, 0.019786959514021873, 0.0018157109152525663, ...
[ "Attention", "False", "LayerNorm", "MLP", "Model", "Module", "None", "__init__", "a", "attention_mask", "attn", "attn_outputs", "class", "config", "def", "eps", "forward", "h", "layer_norm_epsilon", "ln_1", "ln_2", "m", "mlp", "n", "n_embd", "n_positions", "nn", ...
openai/modeling_openai.py:OpenAIGPTSequenceSummary
[ -0.00036174713750369847, 0.029434220865368843, 0.018614526838064194, 0.04141732305288315, -0.0015487867640331388, 0.01768380030989647, 0.02059232071042061, -0.0029666901100426912, -0.004304609261453152, 0.003213914344087243, 0.028154471889138222, 0.020359639078378677, 0.0014106320450082421, ...
[ "Dropout", "Identity", "Linear", "ModelGPTSequenceSummary", "Module", "None", "NotImplementedError", "__init__", "activation", "activation_string", "and", "attn", "class", "cls_index", "config", "def", "dim", "dtype", "elif", "else", "expand", "first", "first_dropout", ...
openai/modeling_openai.py:OpenAIGPTPreTrainedModel
[ -0.00019242832786403596, 0.03904610127210617, -0.0014401087537407875, -0.012939696200191975, -0.0010641197441145778, 0.029965613037347794, 0.04267829656600952, -0.014074757695198059, 0.009761525318026543, 0, 0.012428918853402138, 0.01475579384714365, 0.0014472028706222773, 0.00322073581628...
[ "Attention", "ModelGPTConfig", "ModelGPTModel", "ModelGPTPreTrainedModel", "PreTrainedModel", "_init_weights", "arange", "base_model_prefix", "bias", "class", "config", "copy_", "def", "elif", "if", "init", "isinstance", "module", "n_positions", "ones", "position_ids", "sel...
openai/modeling_openai.py:OpenAIGPTDoubleHeadsModelOutput
[ -0.00019254995277151465, 0.0170366782695055, 0.03997940570116043, -0.013913286849856377, -0.0008234394481405616, 0.07814156264066696, 0.038616470992565155, 0.0006317768129520118, 0.012720719911158085, -0.008631916716694832, 0.028735198080539703, 0.01987612433731556, -0.0025696989614516497, ...
[ "ModelGPTDoubleHeadsModelOutput", "ModelOutput", "None", "attentions", "class", "hidden_states", "logits", "loss", "mc_logits", "mc_loss", "r" ]
openai/modeling_openai.py:OpenAIGPTModel
[ -0.00007831506809452549, 0.019712647423148155, -0.02105669118463993, -0.009128299541771412, -0.0004760156152769923, 0.022960754111409187, 0.049505624920129776, 0.0014560477575287223, 0.001792058814316988, 0.0011060363613069057, 0.023072756826877594, 0.008400275371968746, -0.00226807454600930...
[ "BaseModelOutput", "Block", "Dropout", "Embedding", "False", "ModelGPTModel", "ModelGPTPreTrainedModel", "ModuleList", "None", "True", "_", "__init__", "all_attentions", "all_hidden_states", "and", "arange", "attention_mask", "attentions", "auto_docstring", "block", "class", ...
openai/modeling_openai.py:OpenAIGPTLMHeadModel
[ -0.00019750179490074515, 0.037468913942575455, 0.01168082095682621, -0.009254369884729385, -0.0009451871737837791, 0.02302306704223156, 0.053043339401483536, -0.006461130455136299, 0.0019891252741217613, 0.012019394896924496, 0.024490224197506905, 0.0026239524595439434, -0.001516531687229871...
[ "CausalLMOutput", "GenerationMixin", "Linear", "ModelGPTLMHeadModel", "ModelGPTModel", "ModelGPTPreTrainedModel", "None", "__init__", "_tied_weights_keys", "attention_mask", "attentions", "auto_docstring", "class", "config", "def", "else", "for", "forward", "hidden_states", "if...
openai/modeling_openai.py:OpenAIGPTDoubleHeadsModel
[ -0.00019916833844035864, 0.033242687582969666, 0.012802926823496819, -0.014487522654235363, -0.0007054244051687419, 0.05008864402770996, 0.042676422744989395, 0.0014389255084097385, 0, 0.008703744038939476, 0.03256884962320328, 0.01796901971101761, -0.0016494998708367348, -0.00550301233306...
[ "CrossEntropyLoss", "Linear", "ModelGPTDoubleHeadsModel", "ModelGPTDoubleHeadsModelOutput", "ModelGPTModel", "ModelGPTPreTrainedModel", "ModelGPTSequenceSummary", "None", "__init__", "_tied_weights_keys", "attention_mask", "attentions", "auto_docstring", "class", "config", "contiguous"...
openai/modeling_openai.py:OpenAIGPTForSequenceClassification
[ -0.00031006871722638607, 0.04014238342642784, -0.0019986145198345184, 0.011906638741493225, -0.0009496962302364409, 0.01287051010876894, 0.025400830432772636, 0.013777682557702065, -0.0033310239668935537, -0.00549973314628005, 0.0421835221350193, 0.018823830410838127, 0.00009346357546746731,...
[ "BCEWithLogitsLoss", "Cannot", "CrossEntropyLoss", "Linear", "MSELoss", "ModelGPTForSequenceClassification", "ModelGPTModel", "ModelGPTPreTrainedModel", "None", "Results", "SequenceClassifierOutput", "ValueError", "__class__", "__init__", "__name__", "and", "arange", "argmax", "a...
falcon_h1/modeling_falcon_h1.py:FalconHybridMambaAttentionDynamicCache
[ -0.00014835517504252493, -0.007855759933590889, 0.01966765709221363, -0.03187517076730728, -0.00042210580431856215, 0.02498018555343151, -0.020006755366921425, -0.05335134640336037, 0.0017449394799768925, 0.024641089141368866, 0.007912276312708855, 0.016050618141889572, 0.002416070085018873,...
[ "Any", "False", "ModelHybridMambaAttentionDynamicCache", "None", "__getitem__", "__init__", "__len__", "append", "batch_size", "beam_idx", "cache_kwargs", "cache_position", "cat", "clamp", "class", "config", "conv_kernel_size", "conv_state", "conv_states", "def", "device", ...
falcon_h1/modeling_falcon_h1.py:FalconH1RotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
falcon_h1/modeling_falcon_h1.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
falcon_h1/modeling_falcon_h1.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
falcon_h1/modeling_falcon_h1.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
falcon_h1/modeling_falcon_h1.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
falcon_h1/modeling_falcon_h1.py:FalconH1Attention
[ -0.00010304809984518215, 0.03224260360002518, 0.03111524134874344, -0.008511596359312534, -0.0004896986647509038, 0.033369969576597214, 0.03990867733955383, -0.008962542749941349, 0.0016064934898167849, 0.013810207135975361, 0.012513738125562668, 0.026154840365052223, -0.0009512132382951677,...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_kwargs", "cache_position", "cla...
falcon_h1/modeling_falcon_h1.py:FalconH1RMSNormGated
[ -0.00013441299961414188, 0.005953338462859392, -0.012761223129928112, 0.04739883169531822, -0.00047712164814583957, 0.03327033296227455, 0.01452728547155857, -0.03873942792415619, 0.010995161719620228, 0.02369941584765911, 0.03030790574848652, -0.018458198755979538, 0.0028769723139703274, ...
[ "F", "ModelRMSNormGated", "Module", "None", "Parameter", "True", "__init__", "and", "batch_size", "class", "def", "dim", "else", "eps", "float32", "forward", "gate", "hidden_size", "hidden_states", "if", "int", "is", "keepdim", "len", "mean", "n_groups", "nn", "...
falcon_h1/modeling_falcon_h1.py:pad_tensor_by_size
[ -0.000021551304598688148, 0.01097853947430849, 0.013430598191916943, -0.0059072342701256275, 0.00009230053547071293, 0.023963307961821556, 0.0064645204693078995, -0.05394531041383743, 0.006074420176446438, 0.019616475328803062, -0.012538940645754337, -0.05060159042477608, 0.00058166752569377...
[ "Model", "Model_shape", "Model_size", "Model_tensor_by_size", "constant", "def", "else", "functional", "if", "input_tensor", "len", "mode", "nn", "return", "shape", "torch", "value" ]
falcon_h1/modeling_falcon_h1.py:reshape_into_chunks
[ -0.00011610210640355945, -0.015866706147789955, 0.012905665673315525, -0.01273806020617485, 0.00019728628103621304, 0.014414120465517044, 0.00466503482311964, -0.048494018614292145, 0.006508701480925083, 0.0032683180179446936, -0.010503312572836876, -0.02704044245183468, 0.001829699380323290...
[ "Model", "Model_into_chunks", "chunk_size", "def", "else", "if", "input_tensor", "len", "pad_size", "pad_tensor_by_size", "return", "shape" ]
falcon_h1/modeling_falcon_h1.py:segment_sum
[ 0.0000940322206588462, 0.008414141833782196, 0.02875296212732792, 0.0057394481264054775, 0.0003012513625435531, 0.02875296212732792, 0.0039005957078188658, -0.02696983329951763, 0.01281624287366867, 0.013874975964426994, 0.022177673876285553, -0.0011632133973762393, 0.0012050054501742125, ...
[ "Model_sum", "None", "bool", "chunk_size", "cumsum", "def", "device", "diagonal", "dim", "dtype", "expand", "inf", "input_tensor", "mask", "masked_fill", "ones", "return", "size", "tensor_segsum", "torch", "tril" ]
falcon_h1/modeling_falcon_h1.py:apply_mask_to_padding_states
[ 0.00005794858589069918, -0.0029697560239583254, 0.05510082468390465, -0.006776063237339258, 0.00009846902685239911, -0.00038864766247570515, -0.003457744140177965, -0.06960104405879974, 0.019296443089842796, 0.0010666025336831808, 0.03480052202939987, 0.028777355328202248, 0.0040712147019803...
[ "Model_mask_to_padding_states", "None", "and", "attention_mask", "def", "hidden_states", "if", "is", "not", "return", "shape" ]
falcon_h1/modeling_falcon_h1.py:FalconH1Mixer
[ -0.0001763056352501735, 0.021199416369199753, 0.01310716662555933, 0.0017452477477490902, -0.0004665866435971111, 0.05767153203487396, 0.021883269771933556, -0.0337367057800293, 0.00900405365973711, 0.009175016544759274, 0.015272698365151882, -0.011283560656011105, -0.0005164508474990726, ...
[ "A", "ACT2FN", "A_cumsum", "A_log", "B", "B_decay", "C", "C_reshaped", "C_times_states", "Conv1d", "D", "D_residual", "F", "Falling", "False", "G", "G_intermediate", "L", "Linear", "M", "M_intermediate", "ModelMixer", "ModelRMSNormGated", "Module", "None", "Paramete...
falcon_h1/modeling_falcon_h1.py:FalconH1MLP
[ -0.00025571382138878107, 0.017633449286222458, 0.027775565162301064, 0.026046793907880783, -0.0009508232469670475, 0.06269671022891998, 0.03273136913776398, -0.008759099058806896, -0.0013397964648902416, -0.008355719968676567, 0.025701040402054787, -0.04540901258587837, -0.001195732271298766...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_multiplier", "down_proj", "forward", "gate_multiplier", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "mlp_multipliers", "nn", "return", "self", "super", "up_p...
falcon_h1/modeling_falcon_h1.py:FalconH1RMSNorm
[ -0.00009884802420856431, 0.04179859161376953, 0.03230918198823929, 0.053095508366823196, -0.00046952810953371227, 0.03931327164173126, 0.022028988227248192, -0.02937198430299759, 0.007964326068758965, 0.042476408183574677, 0.019995542243123055, 0.006975846365094185, 0.0028524715453386307, ...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tuple", ...
falcon_h1/modeling_falcon_h1.py:FalconH1DecoderLayer
[ -0.00015264080138877034, 0.03613969683647156, 0.012818298302590847, -0.006550319958478212, -0.0005011559696868062, 0.049917954951524734, 0.02913763001561165, -0.04381938278675079, 0.008526709862053394, -0.002752828411757946, 0.007284407503902912, 0.01806984841823578, -0.0003652791492640972, ...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "ModelMixer", "ModelRMSNorm", "None", "Tensor", "__init__", "attention_hidden_states", "attention_in_multiplier", "attention_mask", "attention_out_multiplier", "attn_out_multiplier", "cache_params...
falcon_h1/modeling_falcon_h1.py:compute_mup_vector
[ -0.00037301628617569804, 0.026318572461605072, 0.02235914207994938, 0.008151770569384098, -0.00112814677413553, 0.05752820894122124, 0.030510911718010902, -0.0400601290166378, -0.007511273957788944, 0.04960934445261955, 0.03679941967129707, -0.03936140611767769, -0.0003056913847103715, 0.0...
[ "Model_mup_vector", "None", "config", "def", "else", "groups_time_state_size", "hidden_size", "if", "int", "intermediate_size", "is", "mamba_d_ssm", "mamba_d_state", "mamba_expand", "mamba_n_groups", "mamba_n_heads", "mup_vector", "not", "num_heads", "ones", "return", "ssm_...
falcon_h1/modeling_falcon_h1.py:FalconH1PreTrainedModel
[ -0.00038758208393119276, 0.03477371856570244, 0.01060598436743021, 0.0005034944624640048, -0.00149961665738374, 0.03778744116425514, 0.023182479664683342, -0.019125545397400856, -0.005592773202806711, 0.017039122059941292, -0.0009924998739734292, -0.01970510743558407, -0.002434160327538848, ...
[ "A_log", "D", "ModelConfig", "ModelDecoderLayer", "ModelMixer", "ModelModel", "ModelPreTrainedModel", "PreTrainedModel", "True", "_init_weights", "_is_stateful", "_no_split_modules", "_skip_keys_device_placement", "_supports_flash_attn", "_supports_sdpa", "arange", "base_model_prefix...
falcon_h1/modeling_falcon_h1.py:FalconH1Model
[ -0.0001186673398478888, 0.03510443493723869, 0.004781854338943958, -0.009507451206445694, -0.0005063139833509922, 0.05468190833926201, 0.016764618456363678, -0.016314562410116196, 0.006188281811773777, -0.0008403405663557351, 0.01597701944410801, 0.002348734298720956, 0.000587183574680239, ...
[ "AttentionMaskConverter", "BaseModelOutputWithPast", "Embedding", "False", "Model", "ModelDecoderLayer", "ModelHybridMambaAttentionDynamicCache", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "None", "Setting", "Tensor", "True", "ValueErr...
falcon_h1/modeling_falcon_h1.py:FalconH1ForCausalLM
[ -0.0002920935512520373, 0.025761226192116737, 0.007352208718657494, -0.004844478331506252, -0.0009973926935344934, 0.045139141380786896, 0.021543681621551514, -0.01225368119776249, 0.002350996946915984, 0.013849508948624134, 0.027129080146551132, -0.006269325036555529, 0.0022227608133107424,...
[ "CausalLMOutputWithPast", "False", "GenerationMixin", "Linear", "ModelForCausalLM", "ModelHybridMambaAttentionDynamicCache", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring...
video_llama_3/modeling_video_llama_3.py:VideoLlama3VisionRotaryEmbedding
[ -0.0003740677493624389, 0.03012978844344616, 0.0015400643460452557, 0.005780715495347977, -0.0017444330733269453, 0.04461077228188515, 0.05488760024309158, -0.02358998917043209, 0.0043501341715455055, 0.03083048202097416, -0.001759030856192112, 0.006452212575823069, -0.0022772515658289194, ...
[ "False", "ModelVisionRotaryEmbedding", "Module", "Tensor", "__init__", "append", "arange", "cat", "class", "cos", "def", "device", "dim", "dtype", "emb", "expand", "flatten", "float", "for", "forward", "grid_thw", "h", "hpos_ids", "in", "inv_freq", "max", "max_gri...
video_llama_3/modeling_video_llama_3.py:VideoLlama3VisionEmbeddings
[ 0.000021579169697361067, 0.0048232185654342175, 0.01772252283990383, 0.03050966002047062, 0.0001840254117269069, 0.01043161191046238, 0.020526720210909843, -0.016825180500745773, 0.014693991281092167, 0.006702030077576637, 0.029163645580410957, 0.006589862518012524, 0.000574860314372927, -...
[ "Conv2d", "ModelVisionEmbeddings", "Module", "__init__", "class", "config", "def", "embed_dim", "embeddings", "forward", "hidden_size", "hidden_states", "in_channels", "kernel_size", "nn", "num_channels", "out_channels", "padding", "patch_embedding", "patch_embeds", "patch_si...
video_llama_3/modeling_video_llama_3.py:VideoLlama3VisionMLP
[ -0.0001662031572777778, 0.03708730265498161, 0.032536715269088745, 0.034129418432712555, -0.00047283468302339315, 0.04709859937429428, 0.03162659704685211, -0.03481200709939003, 0.003341838950291276, 0, 0.049373894929885864, -0.020136358216404915, 0.0019482210045680404, -0.0018913387320935...
[ "ACT2FN", "Linear", "ModelVisionMLP", "Module", "__init__", "activation_fn", "class", "config", "def", "fc1", "fc2", "forward", "hidden_act", "hidden_size", "hidden_states", "intermediate_size", "nn", "return", "self", "super" ]
video_llama_3/modeling_video_llama_3.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
video_llama_3/modeling_video_llama_3.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
video_llama_3/modeling_video_llama_3.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
video_llama_3/modeling_video_llama_3.py:apply_rotary_pos_emb_vision
[ -0.00006463831959990785, 0.03513491153717041, 0.018587501719594002, -0.0022809358779340982, -0.0004639792023226619, 0.01700076460838318, 0.060749396681785583, -0.009180412627756596, 0.016887426376342773, 0.04692211002111435, 0.009010405279695988, 0.01983422413468361, 0.0013458938337862492, ...
[ "Model_rotary_pos_emb_vision", "cos", "def", "dtype", "float", "k", "k_embed", "orig_q_dtype", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze" ]
video_llama_3/modeling_video_llama_3.py:VideoLlama3VisionAttention
[ -0.00013496835890691727, 0.037487681955099106, 0.018743840977549553, 0.022357111796736717, -0.0005222305189818144, 0.019760074093937874, 0.036810193210840225, -0.019195500761270523, 0.0014749483671039343, 0.029583653435111046, 0.027551189064979553, 0.028454506769776344, -0.000553987803868949...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelVisionAttention", "Module", "None", "Tensor", "ValueError", "__init__", "_attn_implementation", "and", "append", "apply_rotary_pos_emb_vision", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_out...
video_llama_3/modeling_video_llama_3.py:VideoLlama3VisionEncoderLayer
[ -0.0000983678619377315, 0.031028034165501595, 0.018549367785453796, 0.03012867085635662, 0, 0.04451848566532135, 0.029004467651247978, -0.016413381323218346, 0.009443314746022224, 0.030803194269537926, 0.020460516214370728, 0.029454149305820465, 0.0008396399789489806, -0.017762426286935806...
[ "GradientCheckpointingLayer", "LayerNorm", "ModelVisionAttention", "ModelVisionEncoderLayer", "ModelVisionMLP", "Tensor", "_", "__init__", "auto_docstring", "class", "config", "cu_seqlens", "def", "embed_dim", "eps", "forward", "hidden_size", "hidden_states", "kwargs", "layer_n...
video_llama_3/modeling_video_llama_3.py:VideoLlama3VisionEncoder
[ -0.00012589206744451076, 0.028397025540471077, -0.002803079318255186, 0.04710300266742706, -0.00029404161614365876, 0.03786269947886467, 0.017353737726807594, -0.030650757253170013, 0.010592540726065636, 0.029073145240545273, 0.023889560252428055, 0.019269410520792007, 0.0011339089833199978,...
[ "BaseModelOutput", "False", "ModelVisionEncoder", "ModelVisionEncoderLayer", "Module", "ModuleList", "Tensor", "_", "__init__", "auto_docstring", "can_return_tuple", "class", "config", "cu_seqlens", "def", "encoder_layer", "for", "forward", "gradient_checkpointing", "hidden_sta...
video_llama_3/modeling_video_llama_3.py:VideoLlama3PreTrainedModel
[ -0.0003172265423927456, 0.045245565474033356, 0.00652580289170146, 0.016938084736466408, -0.0016604543197900057, 0.03619645535945892, 0.027727412059903145, -0.013109613209962845, -0.003770463867112994, 0.025291111320257187, 0.00643879221752286, -0.007250891998410225, -0.004118506796658039, ...
[ "Model", "ModelConfig", "ModelPreTrainedModel", "ModelVisionEncoderLayer", "ModelVisionRotaryEmbedding", "PreTrainedModel", "True", "_can_compile_fullgraph", "_init_weights", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn", "_suppor...
video_llama_3/modeling_video_llama_3.py:VideoLlama3VisionModel
[ -0.00015470758080482483, 0.04542284458875656, -0.010796316899359226, 0.036025017499923706, -0.00030417085508815944, 0.023494575172662735, 0.034682467579841614, -0.04654163494706154, 0.0026151700876653194, 0.03378743678331375, 0.016893718391656876, 0.002824943047016859, -0.0012166834203526378...
[ "BaseModelOutput", "False", "LayerNorm", "ModelPreTrainedModel", "ModelVisionAttention", "ModelVisionConfig", "ModelVisionEmbeddings", "ModelVisionEncoder", "ModelVisionEncoderLayer", "ModelVisionModel", "ModelVisionRotaryEmbedding", "__init__", "_can_record_outputs", "append", "attentio...
video_llama_3/modeling_video_llama_3.py:VideoLlama3Projector
[ -0.00029182113939896226, 0.0368947871029377, 0.018218232318758965, 0.04262378811836243, -0.0009954144479706883, 0.03185326233506203, 0.036665625870227814, -0.03964470699429512, -0.002334569115191698, 0.008421636186540127, 0.01718701235949993, 0.006244614254683256, -0.002205666620284319, -0...
[ "GELU", "Linear", "ModelProjector", "Module", "Sequential", "__init__", "class", "config", "def", "forward", "hidden_size", "hidden_states", "in_hidden_size", "nn", "out_hidden_size", "readout", "return", "self", "super", "text_config", "vision_config" ]
video_llama_3/modeling_video_llama_3.py:VideoLlama3ModelOutputWithPast
[ -0.00016542868979740888, 0.009961297735571861, 0.020833343267440796, 0.02037796936929226, -0.0009463232709094882, 0.03210384026169777, 0.039845190942287445, -0.04417124018073082, 0.01867031864821911, -0.002959928475320339, 0.015255016274750233, 0.026753200218081474, -0.0019637986551970243, ...
[ "ModelModelOutputWithPast", "ModelOutput", "Model_hidden_states", "None", "attentions", "class", "hidden_states", "image_hidden_states", "last_hidden_state", "past_key_values", "r" ]
video_llama_3/modeling_video_llama_3.py:VideoLlama3Model
[ -0.000023616006728843786, 0.0394502654671669, -0.008023783564567566, 0.014598827809095383, 0, 0.04569098725914955, 0.013595854863524437, -0.03722143918275833, 0.0007139216759242117, 0.018499277532100677, 0.03298666328191757, 0.013372971676290035, 0.0007034740410745144, 0.025520088151097298...
[ "AutoModel", "False", "Image", "Model", "ModelModel", "ModelModelOutputWithPast", "ModelPreTrainedModel", "ModelProjector", "Model_compression_mask", "Model_embeds", "Model_features", "Model_grid_thw", "Model_hidden_states", "Model_mask", "Model_merge_sizes", "Model_token_id", "None"...
video_llama_3/modeling_video_llama_3.py:VideoLlama3CausalLMOutputWithPast
[ -0.00018022273434326053, 0.023753713816404343, 0.02855013683438301, 0.013018862344324589, -0.001270481152459979, 0.03677257522940636, 0.03654417395591736, -0.032661356031894684, 0.01838628761470318, -0.0034688415471464396, 0.023753713816404343, 0.014846071600914001, -0.00031226713326759636, ...
[ "ModelCausalLMOutputWithPast", "ModelOutput", "Model_hidden_states", "None", "attentions", "class", "hidden_states", "image_hidden_states", "logits", "loss", "past_key_values", "r" ]
video_llama_3/modeling_video_llama_3.py:VideoLlama3ForConditionalGeneration
[ -0.00023132354544941336, 0.028479283675551414, -0.0023308938834816217, 0.028931336477398872, -0.0008334711310453713, 0.03910250961780548, 0.01966426707804203, -0.040684692561626434, -0.006385236047208309, 0.0010383072076365352, 0.0345819890499115, -0.004831307101994753, 0.0017163853626698256...
[ "False", "GenerationMixin", "Linear", "Model", "ModelCausalLMOutputWithPast", "ModelForConditionalGeneration", "ModelModel", "ModelPreTrainedModel", "Model_compression_mask", "Model_grid_thw", "Model_hidden_states", "Model_idx", "Model_mask", "Model_merge_sizes", "Model_nums", "Model_t...
minimax/modeling_minimax.py:MiniMaxRMSNorm
[ -0.00009884802420856431, 0.04179859161376953, 0.03230918198823929, 0.053095508366823196, -0.00046952810953371227, 0.03931327164173126, 0.022028988227248192, -0.02937198430299759, 0.007964326068758965, 0.042476408183574677, 0.019995542243123055, 0.006975846365094185, 0.0028524715453386307, ...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tuple", ...
minimax/modeling_minimax.py:MiniMaxCache
[ -0.00023054851044435054, -0.02951020933687687, 0.02035975642502308, 0.002230422804132104, -0.0008721524500288069, 0.025621267035603523, -0.007063005119562149, -0.051242534071207047, -0.0018014953238889575, 0.00817821640521288, -0.013496916741132736, 0.00806383602321148, -0.001515543670393526...
[ "DynamicCache", "ModelCache", "None", "RuntimeError", "_", "__init__", "__len__", "append", "batch_repeat_interleave", "batch_select_indices", "class", "crop", "def", "dim", "doesnot", "else", "for", "get_linear_cache", "if", "in", "indices", "layer_idx", "len", "linear...
minimax/modeling_minimax.py:MiniMaxLightningAttention
[ -0.00013516386388801038, 0.017979443073272705, 0.026686470955610275, -0.012042835354804993, -0.0005265206564217806, 0.052242156118154526, 0.028495721518993378, -0.01537864375859499, 0.004721017554402351, -0.0005265206564217806, 0.01362593099474907, 0.010742435231804848, 0.0026432042941451073...
[ "ACT2FN", "F", "Linear", "ModelLightningAttention", "ModelRMSNorm", "Module", "None", "Tensor", "__init__", "act_fn", "append", "arange", "attention_mask", "attn_output", "attn_output_inter", "attn_output_intra", "attn_weights_inter", "attn_weights_intra", "base", "batch_size",...
minimax/modeling_minimax.py:MiniMaxRotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
minimax/modeling_minimax.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
minimax/modeling_minimax.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]