identifier
stringlengths
24
102
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
eomt/modeling_eomt.py:EomtHungarianMatcher
[ -0.0001790501846699044, 0.011178349144756794, 0.01247031893581152, -0.015278949402272701, -0.0005090642371214926, 0.02965913712978363, 0.02965913712978363, -0.03437763452529907, 0.011290693655610085, 0.006375590804964304, 0.012020938098430634, 0.008032682351768017, -0.004100600257515907, -...
[ "All", "False", "ModelHungarianMatcher", "Module", "None", "ValueError", "__init__", "align_corners", "and", "append", "as_tensor", "assigned_indices", "batch_size", "be", "can", "class", "class_labels", "class_queries_logits", "cost_class", "cost_dice", "cost_mask", "cost_...
eomt/modeling_eomt.py:dice_loss
[ 0.00004283144880901091, 0.0321032777428627, 0.04227684810757637, -0.013282164931297302, 0.0002755342866294086, 0.06646734476089478, -0.0032075014896690845, 0.007036721333861351, 0.02306010015308857, -0.005482425447553396, 0.031877197325229645, -0.014243002980947495, 0.0038716099224984646, ...
[ "Model_loss", "def", "denominator", "flatten", "inputs", "labels", "loss", "num_masks", "numerator", "probs", "r", "return", "sigmoid", "sum" ]
eomt/modeling_eomt.py:sigmoid_cross_entropy_loss
[ 0.00011917051597265527, 0.033199504017829895, 0.04890197142958641, -0.002635771408677101, 0.0002541136054787785, 0.05473431572318077, 0.001710447366349399, 0.02411450445652008, 0.017721356824040413, -0.004234058316797018, 0.03073197230696678, -0.014300461858510971, 0.002257229760289192, -0...
[ "BCEWithLogitsLoss", "Model_cross_entropy_loss", "criterion", "cross_entropy_loss", "def", "inputs", "labels", "loss", "mean", "nn", "none", "num_masks", "r", "reduction", "return", "sum" ]
eomt/modeling_eomt.py:EomtLoss
[ -0.00010183797712670639, 0.03676612302660942, 0.005960568320006132, -0.0075760493054986, -0.0001088012577383779, 0.04478782042860985, 0.0013230233453214169, 0.015820574015378952, 0.005626330617815256, 0.000543135916814208, 0.0033702277578413486, 0.01838306151330471, -0.003133476246148348, ...
[ "CrossEntropyLoss", "False", "ModelHungarianMatcher", "ModelLoss", "Module", "None", "PartialState", "Tensor", "_", "__init__", "_get_predictions_permutation_indices", "_get_targets_permutation_indices", "_max_by_axis", "_pad_images_to_max_in_batch", "_shared_state", "abs", "align_co...
eomt/modeling_eomt.py:EomtPatchEmbeddings
[ -0.00005762737418990582, 0.010473225265741348, 0.017567990347743034, 0.010811070911586285, 0, -0.0070666116662323475, 0.011655686423182487, -0.01801845245063305, 0.005771535448729992, 0.0014217684511095285, 0.00962861068546772, 0.005461843218654394, -0.003631844185292721, -0.00720738060772...
[ "Conv2d", "Expected", "Iterable", "Make", "ModelPatchEmbeddings", "Module", "ValueError", "__init__", "abc", "but", "channel", "class", "collections", "config", "configuration", "def", "dimension", "else", "embeddings", "f", "flatten", "forward", "got", "hidden_size", ...
eomt/modeling_eomt.py:EomtEmbeddings
[ -0.00017601998115424067, 0.005406201351433992, 0.013359827920794487, 0.020266178995370865, -0.0009482080931775272, 0.027172530069947243, 0.029210470616817474, -0.014492016285657883, 0.009510385803878307, 0.003849441884085536, 0.016077080741524696, 0.018454676494002342, -0.0013090933207422495...
[ "Dropout", "Embedding", "False", "ModelEmbeddings", "ModelPatchEmbeddings", "Module", "Parameter", "_", "__init__", "arange", "batch_size", "cat", "class", "cls_token", "cls_tokens", "config", "def", "dim", "dropout", "dtype", "embeddings", "expand", "forward", "hidden_...
eomt/modeling_eomt.py:eager_attention_forward
[ 0.000051929029723396525, 0.031000085175037384, 0.02477744035422802, -0.010239078663289547, 0.00008883178816176951, 0.035978201776742935, 0.05928483232855797, -0.020365018397569656, 0.022175243124365807, 0.01170988567173481, 0.02477744035422802, 0.031452640891075134, 0.0025173425674438477, ...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "softmax", "...
eomt/modeling_eomt.py:EomtAttention
[ -0.00004431658089742996, 0.03145160898566246, 0.041561052203178406, -0.0012777216034010053, -0.00009477604908170179, 0.02561059594154358, 0.04245967045426369, -0.002906465670093894, 0.003103038063272834, 0.023813361302018166, 0.01729838363826275, 0.010783408768475056, -0.0034119379706680775,...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelAttention", "Module", "None", "ValueError", "__init__", "_attn_implementation", "and", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "batch_size", "be", "by", "class", "config", ...
eomt/modeling_eomt.py:EomtLayerScale
[ 0, 0.02298853173851967, 0.024222061038017273, 0.020857887342572212, 0, 0.032071802765131, 0.031174691393971443, -0.0167087372392416, 0.005382680334150791, 0.0331931971013546, 0.0029576707165688276, -0.027922654524445534, 0.0015419136034324765, -0.01626018062233925, -0.030726134777069092,...
[ "ModelLayerScale", "Module", "Parameter", "__init__", "class", "config", "def", "forward", "hidden_size", "hidden_state", "lambda1", "layerscale_value", "nn", "ones", "return", "self", "super", "torch" ]
eomt/modeling_eomt.py:drop_path
[ 0, 0.017753854393959045, 0.03775503486394882, -0.02123720571398735, 0, 0.043598074465990067, 0.0339345820248127, -0.027304979041218758, 0.014832334592938423, 0.0017486985307186842, 0.020113544538617134, -0.04944111406803131, 0.0016574009787291288, -0.018315685912966728, -0.05034004524350...
[ "False", "Model_path", "Model_prob", "def", "device", "div", "dtype", "floor_", "if", "input", "keep_prob", "ndim", "not", "or", "output", "rand", "random_tensor", "return", "shape", "torch", "training" ]
eomt/modeling_eomt.py:EomtDropPath
[ -0.00011107319733127952, 0.018094833940267563, 0.03596488758921623, -0.025063030421733856, -0.00047414645086973906, 0.031918834894895554, 0.033717080950737, -0.026861274614930153, 0.0066872211173176765, -0.002430439693853259, 0.009721758775413036, -0.033717080950737, -0.000818341679405421, ...
[ "ModelDropPath", "Module", "None", "__init__", "class", "def", "drop_path", "drop_prob", "extra_repr", "f", "forward", "hidden_states", "nn", "p", "return", "self", "super", "training" ]
eomt/modeling_eomt.py:EomtMLP
[ -0.00027111603412777185, 0.02872752584517002, 0.04113781824707985, 0.018500525504350662, -0.0009767358424142003, 0.0457342192530632, 0.02596968226134777, -0.02631441317498684, 0.0004362993058748543, -0.012582655996084213, 0.03493266925215721, -0.031025728210806847, 0.0005996871041134, -0.0...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "activation", "class", "config", "def", "else", "fc1", "fc2", "forward", "hidden_act", "hidden_features", "hidden_size", "hidden_state", "if", "in_features", "int", "isinstance", "mlp_ratio", "nn", "out_features", ...
eomt/modeling_eomt.py:EomtSwiGLUFFN
[ -0.00014910918253008276, 0.03294603154063225, 0.007668472360819578, 0.014996124431490898, -0.00039052407373674214, 0.02590239606797695, 0.033400457352399826, -0.027038466185331345, 0.0038910398725420237, -0.004572681616991758, 0.03135553374886513, -0.018063513562083244, 0.0021301312372088432...
[ "Linear", "ModelSwiGLUFFN", "Module", "__init__", "chunk", "class", "config", "def", "dim", "forward", "functional", "hidden", "hidden_features", "hidden_size", "hidden_state", "in_features", "int", "mlp_ratio", "nn", "out_features", "return", "self", "silu", "super", ...
eomt/modeling_eomt.py:EomtLayer
[ 0, 0.021654782816767693, 0.02086937613785267, 0.01150059700012207, 0.00019459851318970323, 0.04757320135831833, 0.008583372458815575, -0.00222999369725585, 0.0038709326181560755, 0.005946650169789791, 0.02109377644956112, 0.00045231005060486495, 0.0018793657654896379, -0.002272068988531828...
[ "GradientCheckpointingLayer", "Identity", "LayerNorm", "ModelAttention", "ModelDropPath", "ModelLayer", "ModelLayerScale", "ModelMLP", "ModelSwiGLUFFN", "None", "_", "__init__", "attention", "attention_mask", "class", "config", "def", "drop_path", "drop_path_rate", "else", "e...
eomt/modeling_eomt.py:EomtLayerNorm2d
[ 0.00002139051321137231, 0.0296544898301363, 0.032125696539878845, 0.03504621610045433, 0.00018340993847232312, 0.025161385536193848, 0.005812954157590866, -0.03594483807682991, 0.01988198794424534, 0.046728286892175674, 0.015501211397349834, 0.007245131302624941, 0.001593648106791079, 0, ...
[ "F", "LayerNorm", "ModelLayerNorm2d", "True", "__init__", "affine", "bias", "class", "def", "elementwise_affine", "eps", "forward", "hidden_state", "layer_norm", "nn", "normalized_shape", "num_channels", "permute", "return", "self", "super", "weight" ]
eomt/modeling_eomt.py:EomtScaleLayer
[ -0.00010110923176398501, 0.022057639434933662, 0.023858262225985527, -0.004192076623439789, -0.00038509428850375116, 0.03376169130206108, 0.023745723068714142, -0.05379362776875496, 0.014630066230893135, 0.03466200456023216, 0.017218463122844696, 0.007596380542963743, 0.0022789142094552517, ...
[ "ACT2FN", "Conv2d", "ConvTranspose2d", "ModelLayerNorm2d", "ModelScaleLayer", "Module", "__init__", "activation", "class", "config", "conv1", "conv2", "def", "forward", "groups", "hidden_act", "hidden_size", "hidden_states", "kernel_size", "layernorm2d", "nn", "padding", ...
eomt/modeling_eomt.py:EomtScaleBlock
[ -0.0001351948594674468, -0.019453920423984528, 0.002643810585141182, 0.02262083813548088, -0.00038702841266058385, 0.03868163377046585, 0.023978088051080704, -0.050218261778354645, 0.007691084872931242, 0.002728638704866171, 0.014703544788062572, -0.036645758897066116, 0.0006680216174572706,...
[ "ModelScaleBlock", "ModelScaleLayer", "Module", "ModuleList", "_", "__init__", "block", "class", "config", "def", "for", "forward", "hidden_states", "in", "nn", "num_blocks", "num_upscale_blocks", "range", "return", "self", "super" ]
eomt/modeling_eomt.py:EomtMaskHead
[ -0.00015040939615573734, 0.015628421679139137, 0.03918430209159851, -0.014326052740216255, -0.0004954662872478366, 0.04620576649904251, 0.028199108317494392, -0.03782530874013901, 0.008833455853164196, -0.006257031112909317, 0.02106439508497715, 0.0054925973527133465, 0.0012174313887953758, ...
[ "ACT2FN", "Linear", "ModelMaskHead", "Module", "__init__", "activation", "class", "config", "def", "fc1", "fc2", "fc3", "forward", "hidden_act", "hidden_size", "hidden_states", "nn", "return", "self", "super" ]
eomt/modeling_eomt.py:EomtPreTrainedModel
[ -0.00024628776009194553, 0.05715293809771538, -0.009638888761401176, 0.020298365503549576, -0.0008575775427743793, 0.024607514962553978, 0.020411763340234756, -0.024040522053837776, 0.00022502552019432187, 0.0028491420671343803, 0.017123201861977577, -0.012190358713269234, -0.003600408323109...
[ "Conv2d", "ConvTranspose2d", "Embedding", "False", "LayerNorm", "Linear", "Model", "ModelAttention", "ModelConfig", "ModelEmbeddings", "ModelForUniversalSegmentation", "ModelLayer", "ModelLayerScale", "ModelLoss", "ModelPreTrainedModel", "None", "PreTrainedModel", "True", "_", ...
eomt/modeling_eomt.py:EomtForUniversalSegmentation
[ -0.00022409313532989472, 0.043989572674036026, -0.012301207520067692, 0.016099276021122932, -0.0010628923773765564, 0.036733560264110565, 0.021768035367131233, -0.022788411006331444, -0.0037413809914141893, -0.0028202077373862267, 0.028910672292113304, 0.017799904569983482, -0.00433660065755...
[ "Embedding", "F", "LayerNorm", "Linear", "ModelEmbeddings", "ModelForUniversalSegmentation", "ModelForUniversalSegmentationOutput", "ModelLayer", "ModelLoss", "ModelMaskHead", "ModelPreTrainedModel", "ModelScaleBlock", "ModuleList", "None", "Tensor", "_", "__init__", "_disable_atte...
chameleon/modeling_chameleon.py:ChameleonVQVAEModelOutput
[ -0.0001690711360424757, 0.02130119316279888, 0.016315806657075882, 0.0062317317351698875, -0.0008851892198435962, 0.043962035328149796, 0.04622812196612358, -0.020847976207733154, 0.012803376652300358, -0.00753473024815321, 0.025833360850811005, 0.02821275033056736, -0.003908995538949966, ...
[ "BaseModelOutputWithPooling", "ModelVQVAEModelOutput", "None", "class", "embedding_loss", "image_tokens", "quantized_last_hidden_state", "r" ]
chameleon/modeling_chameleon.py:ChameleonRMSNorm
[ -0.00009884802420856431, 0.04179859161376953, 0.03230918198823929, 0.053095508366823196, -0.00046952810953371227, 0.03931327164173126, 0.022028988227248192, -0.02937198430299759, 0.007964326068758965, 0.042476408183574677, 0.019995542243123055, 0.006975846365094185, 0.0028524715453386307, ...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tuple", ...
chameleon/modeling_chameleon.py:ChameleonRotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
chameleon/modeling_chameleon.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
chameleon/modeling_chameleon.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
chameleon/modeling_chameleon.py:ChameleonMLP
[ -0.0002508739707991481, 0.025181248784065247, 0.022293491289019585, 0.02414165623486042, -0.0009818377438932657, 0.05960332229733467, 0.038118403404951096, -0.0035808198153972626, 0.0006569649558514357, -0.0040428610518574715, 0.026913903653621674, -0.04574208706617355, -0.001653241459280252...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
chameleon/modeling_chameleon.py:ChameleonLayerNorm
[ 0, 0.03650980070233345, 0.029342232272028923, 0.0423334501683712, -0.00009186946408590302, 0.025870440527796745, 0.019710812717676163, -0.033597975969314575, 0.012935220263898373, 0.04166148975491524, 0.010023396462202072, 0.013663176447153091, 0.004003758542239666, -0.007951521314680576, ...
[ "F", "LayerNorm", "ModelLayerNorm", "None", "__init__", "args", "bias", "class", "def", "eps", "forward", "hidden_size", "hidden_states", "kwargs", "layer_norm", "nn", "normalized_shape", "return", "self", "super", "weight" ]
chameleon/modeling_chameleon.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
chameleon/modeling_chameleon.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
chameleon/modeling_chameleon.py:ChameleonAttention
[ -0.00014282259508036077, 0.034531332552433014, 0.022230954840779305, 0.0008886739960871637, -0.0006100817117840052, 0.02629346400499344, 0.03362855315208435, -0.013259579427540302, 0.002383903134614229, 0.023246582597494125, 0.010494816116988659, 0.021215327084064484, -0.0010367862414568663,...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Instantiating", "Linear", "ModelAttention", "ModelLayerNorm", "Module", "None", "Please", "True", "ValueError", "_", "__class__", "__init__", "__name__", "_attn_implementation", "a", "and", "apply_rotary_pos_emb", "attention_dropout", "att...
chameleon/modeling_chameleon.py:ChameleonDecoderLayer
[ -0.00011197099956916645, 0.04468965530395508, 0.010269592516124249, 0.001756269601173699, -0.0006665360415354371, 0.0372413769364357, 0.04243260249495506, -0.03453291580080986, 0.007899686694145203, -0.004429467022418976, -0.002849529730156064, 0.022006269544363022, -0.001565830665640533, ...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "ModelRMSNorm", "None", "__init__", "attention_mask", "cache_position", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "if", "input_layernorm", "kwargs", "la...
chameleon/modeling_chameleon.py:ChameleonSwinDecoderLayer
[ -0.000011153133527841419, 0.04545929655432701, 0.016123494133353233, 0.013604198582470417, -0.00009491098171565682, 0.028887927532196045, 0.03739754855632782, -0.021050117909908295, 0.01091694924980402, -0.006438200827687979, -0.0012106617214158177, 0.019706493243575096, -0.00106370274443179...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelMLP", "ModelRMSNorm", "ModelSwinDecoderLayer", "None", "__init__", "attention_mask", "cache_position", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "if", "input_layernorm", "kwargs", ...
chameleon/modeling_chameleon.py:ChameleonVQVAEVectorQuantizer
[ -0.00019856782455462962, 0.023054955527186394, 0.029465356841683388, 0.003767517162486911, -0.0006958660087548196, 0.02091815508902073, 0.027665946632623672, -0.02901550382375717, 0.002811579965054989, 0.019793521612882614, 0.048809025436639786, 0.011808635666966438, 0.0007099239155650139, ...
[ "Embedding", "ModelVQVAEVectorQuantizer", "Module", "True", "__init__", "argmin", "bd", "beta", "bn", "class", "config", "contiguous", "def", "detach", "dim", "distances", "dn", "einsum", "embed_dim", "embedding", "embedding_dim", "forward", "getattr", "hidden_state", ...
chameleon/modeling_chameleon.py:ChameleonVQVAEEncoderConvDownsample
[ -0.000030870887712808326, 0.01970171369612217, 0.037399861961603165, 0.006177655886858702, -0.00008826464909361675, 0.01385798491537571, 0.023931460455060005, -0.056099794805049896, 0.015249349176883698, 0.022484440356492996, 0.024376695975661278, -0.0022400959860533476, 0.002782727824524045...
[ "Conv2d", "F", "ModelVQVAEEncoderConvDownsample", "Module", "__init__", "class", "constant", "conv", "def", "forward", "hidden_states", "in_channels", "kernel_size", "mode", "nn", "pad", "padding", "return", "self", "stride", "super", "value" ]
chameleon/modeling_chameleon.py:ChameleonVQVAEEncoderResnetBlock
[ -0.00016613112529739738, 0.008370180614292622, 0.006871466059237719, -0.00424164580181241, -0.0004276992694940418, 0.038231365382671356, 0.014308484271168709, -0.030539847910404205, 0, 0.008709512650966644, 0.04411311447620392, -0.011480720713734627, 0.001809768844395876, -0.00910539925098...
[ "Conv2d", "Dropout", "False", "GroupNorm", "ModelVQVAEEncoderResnetBlock", "Module", "None", "True", "__init__", "affine", "class", "config", "conv1", "conv2", "conv_shortcut", "def", "dropout", "else", "eps", "forward", "hidden_states", "if", "in_channels", "is", "ke...
chameleon/modeling_chameleon.py:ChameleonVQVAEEncoderAttnBlock
[ -0.00009778426465345547, 0.008665190078318119, 0.03284843638539314, -0.019029438495635986, -0.0005132568185217679, 0.022087739780545235, 0.03692617267370224, -0.03828541561961174, 0.007419215515255928, 0.020955035462975502, 0.03171572834253311, 0.018576355651021004, -0.0016495011514052749, ...
[ "Conv2d", "F", "GroupNorm", "ModelVQVAEEncoderAttnBlock", "Module", "True", "__init__", "affine", "attn_output", "attn_weights", "batch_size", "bmm", "channels", "class", "def", "dim", "eps", "forward", "height", "hidden_states", "in_channels", "int", "k", "kernel_size"...
chameleon/modeling_chameleon.py:ChameleonVQVAEEncoder
[ -0.00008092795906122774, 0.017001910135149956, 0.014187023974955082, 0.021956108510494232, -0.000098960823379457, 0.04120992496609688, 0.027923665940761566, -0.03490458056330681, 0.0023222805466502905, 0.016438931226730347, 0.04346183314919472, 0.008219465613365173, 0.000016273557776003145, ...
[ "Conv2d", "GroupNorm", "Identity", "ModelVQVAEEncoder", "ModelVQVAEEncoderAttnBlock", "ModelVQVAEEncoderConvDownsample", "ModelVQVAEEncoderResnetBlock", "Module", "ModuleList", "None", "True", "__init__", "affine", "and", "append", "attn", "attn_1", "attn_resolutions", "attn_type...
chameleon/modeling_chameleon.py:ChameleonImageVocabularyMapping
[ -0.00014094171638134867, 0.011232788674533367, 0.009247194975614548, -0.02666369080543518, -0.0007658719550818205, 0.008679882623255253, 0.03789648041129112, -0.037669554352760315, 0.0012835446977987885, 0.00253872387111187, 0.029954103752970695, 0.0229194276034832, -0.002311798743903637, ...
[ "A", "IMGIMG", "ModelImageVocabularyMapping", "bpe2img", "bpe2img_search_tensors", "c", "cached_property", "chr", "class", "convert_img2bpe", "cpu", "def", "device", "dtype", "for", "get", "i", "if", "image", "image_token_id", "image_tokens", "img2bpe", "img2bpe_mapping_t...
chameleon/modeling_chameleon.py:ChameleonPreTrainedModel
[ -0.00034034677082672715, 0.03028540126979351, 0.01758882962167263, 0.018986618146300316, -0.001951078767888248, 0.03238208219408989, 0.02341294474899769, -0.03284801170229912, -0.002125802217051387, 0.009668032638728619, 0.01025044359266758, 0.004630172159522772, -0.004775775130838156, 0.0...
[ "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "ModelSwinDecoderLayer", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn", "_su...
chameleon/modeling_chameleon.py:ChameleonVQVAE
[ -0.0000834201491670683, 0.018320821225643158, -0.0011029021115973592, 0.01708444580435753, -0.00008912783960113302, 0.040013570338487625, 0.04068795591592789, -0.03057216666638851, 0.0041587138548493385, 0.01865801401436329, 0.03731602802872658, 0.02180514857172966, -0.002416549948975444, ...
[ "Conv2d", "ModelPreTrainedModel", "ModelVQVAE", "ModelVQVAEConfig", "ModelVQVAEEncoder", "ModelVQVAEEncoderAttnBlock", "ModelVQVAEEncoderResnetBlock", "ModelVQVAEModelOutput", "ModelVQVAEVectorQuantizer", "__init__", "_can_record_outputs", "_no_split_modules", "attentions", "check_model_in...
chameleon/modeling_chameleon.py:ChameleonModel
[ -0.000035578104871092364, 0.041125670075416565, -0.013298789970576763, -0.002151274820789695, -0.00020255509298294783, 0.02883267030119896, 0.03799654170870781, -0.02637406997382641, 0.007375799585133791, 0.012237121351063251, 0.03643197938799858, 0.017433708533644676, -0.001788072637282312,...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "Image", "ModelDecoderLayer", "ModelImageVocabularyMapping", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModelSwinDecoderLayer", "ModelVQVAE", "ModuleList", "None", "Setting", "Tokeniz...
chameleon/modeling_chameleon.py:ChameleonForConditionalGeneration
[ -0.00017945481522474438, 0.041010238230228424, 0.006863052025437355, 0.004229881335049868, -0.0006127725355327129, 0.019272571429610252, 0.030477553606033325, -0.038993339985609055, 0.00023810588754713535, 0.017927972599864006, 0.03720054402947426, 0.018264122307300568, 0.0016457318561151624...
[ "CausalLMOutputWithPast", "False", "GenerationMixin", "Linear", "ModelForConditionalGeneration", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "_tied_weights_keys", "and", "attention_mask", "attentions", "auto_docstring", "cache_position", "can_return_tuple", "c...
fuyu/modeling_fuyu.py:FuyuPreTrainedModel
[ -0.0003617391630541533, 0.030015261843800545, 0.014367771334946156, 0.015472983941435814, -0.0017814290476962924, 0.03187667578458786, 0.023151306435465813, -0.023732997477054596, -0.002268595388159156, 0.01489129289984703, 0.013727910816669464, 0.006922124419361353, -0.00328655494377017, ...
[ "Model", "ModelConfig", "ModelPreTrainedModel", "PreTrainedModel", "True", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn", "_supports_flex_attn", "_supports_sdpa", "base_model_prefix", "class", "config", "image", "input_modalit...
fuyu/modeling_fuyu.py:FuyuModel
[ -0.00008111935312626883, 0.037290479987859726, -0.0004919496132060885, 0.007257129065692425, -0.0003942574840039015, 0.030368294566869736, 0.0024004350416362286, -0.02657225728034973, 0, 0.00837361067533493, 0.027688737958669662, 0.02065490558743477, -0.000423914025304839, 0.01697051711380...
[ "AutoModel", "BaseModelOutputWithPooling", "Batch", "False", "Got", "Image", "Linear", "ModelModel", "ModelPreTrainedModel", "None", "Number", "True", "ValueError", "_", "__init__", "_checkpoint_conversion_mapping", "all", "and", "arange", "as_tuple", "attention_mask", "aut...
fuyu/modeling_fuyu.py:FuyuForCausalLM
[ -0.00008987155160866678, 0.04445064440369606, 0.007762109860777855, 0.007538740057498217, -0.00048164170584641397, 0.021331841126084328, 0.011280188336968422, -0.026022613048553467, 0.004299873951822519, 0.015189165249466896, 0.030154960229992867, 0.005807621870189905, 0.0015566102229058743,...
[ "CausalLMOutputWithPast", "False", "GenerationMixin", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "_checkpoint_conversion_mapping", "_tied_weights_keys", "and", "attention_mask", "attentions", "auto_docstring", "cache_position", "...
aria/modeling_aria.py:AriaTextRMSNorm
[ -0.0000789765763329342, 0.04924608767032623, 0.03659571707248688, 0.056474871933460236, -0.00048356608022004366, 0.04337270185351372, 0.019879154860973358, -0.029592832550406456, 0.00677698478102684, 0.04201730340719223, 0.028011536225676537, 0.016377713531255722, 0.0027672688011080027, 0....
[ "ModelTextRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tupl...
aria/modeling_aria.py:AriaProjectorMLP
[ -0.00035906347329728305, 0.02263362891972065, 0.01974668726325035, 0.03810764104127884, -0.0009310389286838472, 0.047345858067274094, 0.034874264150857925, -0.018591908738017082, -0.009642387740314007, 0.003637547604739666, 0.02217171899974346, -0.0321028009057045, -0.004936671815812588, 0...
[ "ACT2FN", "Linear", "ModelProjectorMLP", "Module", "__init__", "act", "class", "def", "forward", "gelu_new", "hidden_features", "hidden_states", "in_features", "linear_in", "linear_out", "nn", "output_dim", "return", "self", "super" ]
aria/modeling_aria.py:AriaCrossAttention
[ -0.00016990643052849919, 0.049159593880176544, 0.05051884427666664, -0.012006721459329128, -0.0008672307594679296, 0.025825778022408485, 0.023220544680953026, -0.03556707873940468, -0.001585793332196772, 0.03262203559279442, 0.026052318513393402, 0.028770823031663895, -0.0008920087711885571,...
[ "Dropout", "LayerNorm", "Linear", "ModelCrossAttention", "Module", "MultiheadAttention", "None", "True", "_", "__init__", "attn_mask", "attn_output", "batch_first", "class", "config", "def", "dropout", "dropout_rate", "forward", "hidden_size", "hidden_states", "k_proj", "...
aria/modeling_aria.py:AriaProjector
[ -0.00029180783894844353, 0.0419064536690712, 0.0038433228619396687, 0.009679479524493217, -0.0009003339218907058, 0.02209198847413063, 0.04122319445014, -0.0015302118845283985, -0.004469641949981451, 0.019017331302165985, 0.0024483390152454376, 0.01503166276961565, -0.0026760913897305727, ...
[ "KeyError", "LayerNorm", "ModelCrossAttention", "ModelProjector", "ModelProjectorMLP", "Module", "None", "Number", "Parameter", "__init__", "amongst", "attention_out", "attn_mask", "batch_size", "class", "config", "cross_attn", "def", "expand", "f", "feed_forward", "forward...
aria/modeling_aria.py:AriaSharedExpertsMLP
[ -0.0004874992882832885, 0.04239382967352867, 0.016076311469078064, 0.018577072769403458, -0.0017713714623823762, 0.058827392756938934, 0.035963304340839386, -0.009824412874877453, -0.00857403315603733, -0.01536180917173624, 0.02286408841609955, -0.03691597655415535, -0.002024424495175481, ...
[ "ACT2FN", "Linear", "ModelSharedExpertsMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "moe_num_shared_experts", "nn", "return", "self", "super", "up_proj", "x" ]
aria/modeling_aria.py:sequential_experts_gemm
[ -0.00024558481527492404, 0.02238885499536991, -0.01017675269395113, -0.013682078570127487, -0.0007985924021340907, 0.04093316197395325, 0.031208708882331848, -0.019109679386019707, 0.0023463068064302206, -0.014417066238820553, 0.02691185660660267, -0.007632564287632704, -0.001349833211861550...
[ "Model_experts_gemm", "cat", "cumsum", "cumsum_num_tokens", "def", "device", "dim", "dtype", "end", "expert_num", "expert_weights", "for", "in", "long", "matmul", "num_tokens", "out", "out_features", "output", "range", "return", "shape", "start", "token_states", "toke...
aria/modeling_aria.py:AriaGroupedExpertsGemm
[ -0.0001837999589042738, 0.012215628288686275, -0.022847747430205345, 0.009105167351663113, -0.0005902806296944618, 0.039587683975696564, 0.03664688393473625, -0.009783812798559666, -0.0018379995599389076, -0.0240919329226017, 0.007804428692907095, -0.022734640166163445, -0.003732552984729409...
[ "ModelGroupedExpertsGemm", "Module", "Parameter", "__init__", "class", "cpu", "def", "empty", "forward", "groups", "in_features", "input", "nn", "out_features", "return", "self", "sequential_experts_gemm", "super", "tokens_per_expert", "torch", "weight" ]
aria/modeling_aria.py:AriaExperts
[ -0.000383338745450601, 0.03404194861650467, -0.015847112983465195, 0.0015406915917992592, -0.0015406915917992592, 0.0727793350815773, 0.05681483820080757, -0.018546992912888527, -0.0022743542212992907, -0.01655142940580845, 0.025472767651081085, -0.013205927796661854, -0.0029639971908181906,...
[ "ModelExperts", "ModelGroupedExpertsGemm", "Module", "__init__", "argsort", "bins", "chunk", "class", "config", "def", "device", "dim", "dtype", "expert_output", "fc1", "fc1_output", "fc2", "flatten", "flatten_indices", "float32", "forward", "functional", "gate", "hidde...
aria/modeling_aria.py:AriaTextMoELayer
[ -0.00046437722630798817, 0.045650139451026917, 0.01390902604907751, 0.015929996967315674, -0.0017460583476349711, 0.07180386036634445, 0.03661521524190903, -0.03566417098045349, -0.006389830727130175, -0.006508711259812117, 0.02900685928761959, -0.024964919313788414, -0.0008953192736953497, ...
[ "Linear", "ModelExperts", "ModelSharedExpertsMLP", "ModelTextMoELayer", "Module", "__init__", "class", "config", "def", "expert_output", "experts", "forward", "hidden_size", "hidden_states", "moe_num_experts", "nn", "original_shape", "return", "router", "router_logits", "self...
aria/modeling_aria.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
aria/modeling_aria.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
aria/modeling_aria.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
aria/modeling_aria.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
aria/modeling_aria.py:AriaTextAttention
[ -0.00006710349407512695, 0.039746321737766266, 0.040419988334178925, -0.009992719627916813, -0.0003806917229667306, 0.02582388184964657, 0.04176732152700424, -0.02054682746529579, 0.001796443946659565, 0.00522091519087553, 0.021669605746865273, 0.03637798875570297, -0.002006964758038521, -...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelTextAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_kwargs", "cache_position", ...
aria/modeling_aria.py:AriaTextDecoderLayer
[ -0.00013269903138279915, 0.04949410259723663, 0.022272346541285515, -0.0006397675024345517, -0.0004710376088041812, 0.03667062893509865, 0.035320792347192764, -0.045444585382938385, 0.0031074420548975468, -0.004527585580945015, 0.013160931877791882, 0.023847157135605812, -0.00331835448741912...
[ "False", "GradientCheckpointingLayer", "ModelTextAttention", "ModelTextDecoderLayer", "ModelTextMoELayer", "ModelTextRMSNorm", "None", "Tensor", "_", "__init__", "attention_mask", "cache_position", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "inp...
aria/modeling_aria.py:AriaTextPreTrainedModel
[ -0.0001975717896129936, 0.05125880613923073, 0.00024807220324873924, 0.005443412810564041, -0.000992288812994957, 0.028464514762163162, 0.01814470998942852, -0.02812430076301098, -0.004195964429527521, 0.0004536177439149469, 0.01128374133259058, 0.008278523571789265, -0.003940804395824671, ...
[ "ModelGroupedExpertsGemm", "ModelTextAttention", "ModelTextConfig", "ModelTextDecoderLayer", "ModelTextPreTrainedModel", "PreTrainedModel", "True", "_can_record_outputs", "_init_weights", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn...
aria/modeling_aria.py:AriaPreTrainedModel
[ -0.000298285303870216, 0.04183853045105934, 0.0100595373660326, 0.0026434864848852158, -0.0010859727626666427, 0.031550366431474686, 0.034751128405332565, -0.026063347235322, -0.006001428700983524, 0.010516789741814137, 0.0007037389441393316, 0, -0.005429863929748535, -0.001578947296366095...
[ "False", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "ModelProjector", "ModelTextAttention", "ModelTextDecoderLayer", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_init_weights", "_no_split_modules", "_skip_keys_device_placement", "_support...
aria/modeling_aria.py:AriaTextRotaryEmbedding
[ -0.00031274184584617615, 0.0535297654569149, 0.009600447490811348, -0.013556995429098606, -0.0016437129816040397, 0.04259107634425163, 0.03723809868097305, -0.0055275303311645985, -0.0055566225200891495, 0.022808335721492767, 0.0075639886781573296, -0.0030983262695372105, -0.0019055433804169...
[ "False", "ModelTextRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "devic...
aria/modeling_aria.py:AriaTextModel
[ -0.00012039471039315686, 0.05196833238005638, 0.007199076469987631, -0.006552284583449364, -0.0006538223824463785, 0.024859311059117317, 0.046793997287750244, -0.025759195908904076, 0.010517400689423084, 0.0077615040354430676, 0.024071911349892616, 0.010798614472150803, -0.003037110436707735...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelTextDecoderLayer", "ModelTextModel", "ModelTextPreTrainedModel", "ModelTextRMSNorm", "ModelTextRotaryEmbedding", "ModuleList", "None", "ValueError", "You", "__init__", "and", "arange", "attention_mask", "auto_do...
aria/modeling_aria.py:AriaTextForCausalLM
[ -0.0002720298944041133, 0.04096450284123421, 0.020140880718827248, -0.004068002570420504, -0.0012090217787772417, 0.02332700788974762, 0.03891627490520477, -0.014906526543200016, 0.002403819700703025, 0.028106199577450752, 0.03140611946582794, 0.006258465349674225, -0.00035381666384637356, ...
[ "CausalLMOutputWithPast", "GenerationMixin", "Linear", "ModelTextForCausalLM", "ModelTextModel", "ModelTextPreTrainedModel", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "cache_position", "class", "colwise_gather_...
aria/modeling_aria.py:AriaCausalLMOutputWithPast
[ -0.00020158913685008883, 0.02637176588177681, 0.02978190779685974, 0.009434727020561695, -0.0011793408775702119, 0.03387407958507538, 0.03773890808224678, -0.030463937669992447, 0.01784641109406948, -0.004205842036753893, 0.020006166771054268, 0.018642110750079155, -0.0008596400148235261, ...
[ "ModelCausalLMOutputWithPast", "ModelOutput", "None", "attentions", "class", "hidden_states", "image_hidden_states", "logits", "loss", "past_key_values", "r" ]
aria/modeling_aria.py:AriaModelOutputWithPast
[ -0.000123380494187586, 0.02538112923502922, 0.014777458272874355, 0.01703355833888054, -0.0007931602885946631, 0.02910369634628296, 0.04331712797284126, -0.038353707641363144, 0.016920752823352814, -0.007952754385769367, 0.02763723023235798, 0.026170765981078148, -0.002354804892092943, 0.0...
[ "BaseModelOutputWithPast", "ModelModelOutputWithPast", "None", "class", "image_hidden_states", "r" ]
aria/modeling_aria.py:AriaModel
[ -0.00008123178849928081, 0.03202704340219498, 0.0028357277624309063, 0.009674835950136185, -0.00009904196485877037, 0.03380632400512695, 0.014178639277815819, -0.04314754530787468, 0.0023214046377688646, 0.024242693558335304, 0.03313909471035004, 0.0151238813996315, 0.00032666471088305116, ...
[ "AutoModel", "False", "Image", "ModelModel", "ModelModelOutputWithPast", "ModelPreTrainedModel", "ModelProjector", "None", "Obtains", "True", "__init__", "_checkpoint_conversion_mapping", "_create_patch_attention_mask", "all", "and", "apply", "attention_mask", "attentions", "attn...
aria/modeling_aria.py:AriaForConditionalGeneration
[ -0.00024328366271220148, 0.03407381847500801, 0.007023493759334087, 0.021211516112089157, -0.0008920401451177895, 0.031817272305488586, 0.029786383733153343, -0.04309999197721481, -0.004174606874585152, 0.028770938515663147, 0.032494235783815384, -0.0018334421329200268, 0.0014667536597698927...
[ "False", "GenerationMixin", "Linear", "ModelCausalLMOutputWithPast", "ModelForConditionalGeneration", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "_checkpoint_conversion_mapping", "_tied_weights_keys", "attention_mask", "attentions", "auto_docstring", "cache_posit...
depth_pro/modeling_depth_pro.py:DepthProOutput
[ -0.00017236349231097847, 0.02740757167339325, 0.022289976477622986, 0.021493904292583466, -0.0010093037271872163, 0.03889373317360878, 0.06004646420478821, -0.02604288049042225, 0.017741000279784203, -0.011315573938190937, 0.009609708562493324, 0.030250681564211845, 0.00021234471932984889, ...
[ "ModelOutput", "None", "attentions", "class", "features", "hidden_states", "last_hidden_state", "r" ]
depth_pro/modeling_depth_pro.py:DepthProDepthEstimatorOutput
[ -0.0001575177739141509, 0.04123583808541298, 0.014922360889613628, 0.02152920886874199, -0.0009967226069420576, 0.06652411818504333, 0.053310420364141464, -0.003445812501013279, 0.01890925131738186, -0.011846760287880898, 0.025402188301086426, 0.02870561182498932, 0.0017229062505066395, 0....
[ "ModelModelEstimatorOutput", "ModelOutput", "None", "attentions", "class", "field_of_view", "hidden_states", "loss", "predicted_Model", "r" ]
depth_pro/modeling_depth_pro.py:split_to_patches
[ 0.00008447738946415484, 0.000879972823895514, -0.00495600700378418, -0.01757129654288292, 0.0007250976050272584, -0.0023090485483407974, 0.014868021011352539, -0.026920128613710403, 0.008729330264031887, 0.0024357647635042667, 0.007490328513085842, -0.019260844215750694, -0.00425202865153551...
[ "F", "Model_to_patches", "batch_size", "def", "height", "if", "kernel_size", "num_channels", "overlap_ratio", "patch_size", "patches", "permute", "pixel_values", "reshape", "return", "shape", "stride", "torch_int", "unfold", "width" ]
depth_pro/modeling_depth_pro.py:reshape_features
[ -0.00006630449934164062, 0.01527655590325594, 0.01810554787516594, 0.009165933355689049, -0.00009415238309884444, 0.025687245652079582, 0.04481123015284538, -0.04707442596554756, 0.019350305199623108, 0.027610961347818375, 0.022971414029598236, 0.011202807538211346, 0.0028855716809630394, ...
[ "Model", "Model_features", "def", "hidden_size", "hidden_states", "n_samples", "permute", "return", "seq_len", "shape", "size", "torch_int" ]
depth_pro/modeling_depth_pro.py:merge_patches
[ 0.000049845504690892994, -0.007195319514721632, -0.010230844840407372, -0.0016512696165591478, 0.00042511409264989197, -0.0006288877921178937, 0.0004303840978536755, -0.028668852522969246, 0.014278212562203407, -0.006633185315877199, -0.018662860617041588, -0.00742017338052392, -0.0006675345...
[ "Model_patches", "Modeld", "_", "append", "batch_size", "box", "box_h", "box_w", "boxes", "boxes_in_row", "cat", "def", "dim", "else", "for", "h", "hidden_size", "i", "if", "in", "min", "n_patches", "n_patches_per_batch", "new_out_size", "out_size", "pad_bottom", ...
depth_pro/modeling_depth_pro.py:reconstruct_feature_maps
[ -0.0001649820915190503, 0.004633539356291294, 0.04088747501373291, 0.024262897670269012, -0.0006072745309211314, -0.00139006192330271, 0.017635531723499298, -0.07189007103443146, 0.007357499096542597, 0.009042422287166119, 0.0004300065047573298, -0.024375226348638535, 0.0029205339960753918, ...
[ "F", "False", "Model_feature_maps", "align_corners", "batch_size", "bilinear", "def", "features", "float", "hidden_state", "interpolate", "merge_patches", "mode", "output_size", "padding", "reshape_features", "return", "size" ]
depth_pro/modeling_depth_pro.py:DepthProPatchEncoder
[ -0.00019317134865559638, 0.029129181057214737, 0.007451651152223349, 0.021225914359092712, -0.0006174426525831223, 0.008411332964897156, 0.027096912264823914, -0.03703244775533676, 0.0009667387930676341, 0.003923407290130854, 0.007395199034363031, 0.019532358273863792, -0.003669373458251357,...
[ "AutoModel", "F", "False", "Image", "ModelPatchEncoder", "Module", "ValueError", "__init__", "align_corners", "append", "base_height", "base_width", "batch_size", "be", "bilinear", "cat", "class", "config", "def", "dim", "encodings", "exponent_value", "f", "features", ...
depth_pro/modeling_depth_pro.py:DepthProImageEncoder
[ -0.00013042592036072165, 0.02980363368988037, 0.0262182354927063, 0.028907284140586853, -0.0006092378171160817, 0.020391959697008133, 0.018039042130112648, -0.04347297176718712, 0.005013957154005766, 0.020504003390669823, 0.008235215209424496, 0.01703064888715744, -0.0006197419133968651, 0...
[ "AutoModel", "F", "False", "ModelImageEncoder", "ModelOutput", "Module", "True", "__init__", "align_corners", "attentions", "base_height", "base_width", "batch_size", "bilinear", "class", "config", "def", "encodings", "exponent_value", "features", "forward", "from_config", ...
depth_pro/modeling_depth_pro.py:DepthProEncoder
[ -0.00012892665108665824, 0.023799683898687363, 0.008363568224012852, 0.02076859213411808, -0.00039993569953367114, 0.023238370195031166, 0.029188290238380432, -0.03974097967147827, 0.007521598134189844, 0.017962025478482246, 0.01728844828903675, 0.026718512177467346, -0.000198213747353293, ...
[ "False", "ModelEncoder", "ModelImageEncoder", "ModelOutput", "ModelPatchEncoder", "Module", "True", "__init__", "attentions", "batch_size", "class", "config", "def", "features", "forward", "height", "hidden_states", "if", "image_encoder", "image_encodings", "image_features", ...
depth_pro/modeling_depth_pro.py:DepthProFeatureUpsampleBlock
[ -0.00033603809424676, 0.007721726316958666, 0.022764794528484344, 0.03294603154063225, -0.0011725585209205747, 0.032259657979011536, 0.001329852850176394, -0.04072495922446251, -0.008236507885158062, 0.0039180610328912735, 0.014585483819246292, -0.034776367247104645, 0.0011296599404886365, ...
[ "Conv2d", "ConvTranspose2d", "ModelFeatureUpsampleBlock", "Module", "ModuleList", "True", "__init__", "append", "class", "config", "def", "else", "features", "for", "forward", "i", "if", "in", "in_channels", "input_dims", "intermediate_dims", "kernel_size", "layer", "la...
depth_pro/modeling_depth_pro.py:DepthProFeatureUpsample
[ -0.0004026050155516714, 0.021144887432456017, 0.01016803365200758, 0.04090322554111481, -0.0014298796886578202, 0.02576672099530697, 0.018949516117572784, -0.036281391978263855, -0.006932749878615141, 0.00895480252802372, 0.010399125516414642, -0.0020509385503828526, 0.0019209495512768626, ...
[ "False", "ModelFeatureUpsample", "ModelFeatureUpsampleBlock", "Module", "ModuleList", "__init__", "append", "block", "class", "config", "def", "else", "enumerate", "feature_dims", "features", "for", "forward", "fusion_hidden_size", "hidden_size", "i", "if", "image_block", ...
depth_pro/modeling_depth_pro.py:DepthProFeatureProjection
[ -0.00021396145166363567, 0.021049562841653824, 0.014825229533016682, 0.03259287029504776, -0.00040493530104868114, 0.013976456597447395, 0.015730587765574455, -0.03893037512898445, 0.0016763260355219245, 0.010524781420826912, 0.008657481521368027, -0.01584375649690628, 0.0014075480867177248,...
[ "Conv2d", "ModelFeatureProjection", "Module", "ModuleList", "__init__", "and", "append", "class", "combined_feature_dims", "config", "def", "else", "enumerate", "features", "for", "forward", "fusion_hidden_size", "i", "if", "in", "in_channels", "intermediate_feature_dims", ...
depth_pro/modeling_depth_pro.py:DepthProNeck
[ -0.0003132035199087113, 0.019930481910705566, 0.03573741763830185, 0.015577848069369793, -0.0010881584603339434, 0.03734101727604866, 0.02611580491065979, -0.04856623336672783, 0.006786672864109278, 0.02565763145685196, -0.00463899178430438, 0.0025199460797011852, 0.005297613795846701, 0.0...
[ "Conv2d", "ModelFeatureProjection", "ModelFeatureUpsample", "ModelNeck", "Module", "__init__", "cat", "class", "config", "def", "dim", "feature_projection", "feature_upsample", "features", "forward", "fuse_image_with_low_res", "global_features", "in_channels", "kernel_size", "n...
depth_pro/modeling_depth_pro.py:DepthProPreTrainedModel
[ -0.0002514503139536828, 0.05258499085903168, -0.006374796852469444, 0.026859143748879433, -0.0008180989534594119, 0.02901240810751915, 0.016319479793310165, -0.0062614670023322105, 0, 0.02606583572924137, 0.006856448017060757, -0.0014237046707421541, -0.001827441737987101, -0.0118429558351...
[ "Conv2d", "ConvTranspose2d", "LayerNorm", "Linear", "Model", "ModelConfig", "ModelPreActResidualLayer", "ModelPreTrainedModel", "None", "PreTrainedModel", "True", "_init_weights", "_keys_to_ignore_on_load_unexpected", "_no_split_modules", "_supports_sdpa", "base_model_prefix", "bias"...
depth_pro/modeling_depth_pro.py:DepthProModel
[ 0.00003321502299513668, 0.03938252851366997, 0.00956592708826065, 0.032222069799900055, 0.00037935053114779294, 0.037592414766550064, 0.021481379866600037, -0.01667044498026371, 0.008447105064988136, 0.016446681693196297, 0.009677808731794357, 0.014097155071794987, 0.0026572018396109343, 0...
[ "ModelEncoder", "ModelModel", "ModelNeck", "ModelOutput", "ModelPreTrainedModel", "None", "__init__", "attentions", "auto_docstring", "class", "config", "def", "else", "encoder", "encodings", "features", "forward", "get_input_embeddings", "hidden_states", "if", "image_encoder...
depth_pro/modeling_depth_pro.py:DepthProPreActResidualLayer
[ -0.00015842537686694413, 0.02322392165660858, 0.021411322057247162, -0.010479086078703403, -0.00019382769824005663, 0.0362519733607769, -0.012518259696662426, -0.025149807333946228, 0.010705661028623581, -0.017899412661790848, 0.025149807333946228, -0.024923231452703476, 0.003271174151450395...
[ "BatchNorm2d", "Conv2d", "ModelPreActResidualLayer", "Module", "None", "ReLU", "__init__", "activation1", "activation2", "batch_norm1", "batch_norm2", "class", "config", "convolution1", "convolution2", "def", "else", "forward", "fusion_hidden_size", "hidden_state", "if", "i...
depth_pro/modeling_depth_pro.py:DepthProFeatureFusionLayer
[ -0.00028630869928747416, 0.02344530262053013, 0.024811048060655594, -0.008877347223460674, -0.0007326657068915665, 0.02594917081296444, 0.005263811908662319, -0.05144309252500534, 0.0020770716946572065, -0.008023756556212902, 0.014681767672300339, -0.021851932629942894, 0.0021482042502611876...
[ "Conv2d", "ConvTranspose2d", "ModelFeatureFusionLayer", "ModelPreActResidualLayer", "Module", "None", "True", "__init__", "class", "config", "deconv", "def", "forward", "fusion_hidden_size", "hidden_state", "if", "in_channels", "is", "kernel_size", "nn", "not", "out_channel...
depth_pro/modeling_depth_pro.py:DepthProFeatureFusionStage
[ -0.00021482835290953517, 0.010467357002198696, 0.001563030993565917, 0.023197924718260765, -0.00048446888104081154, 0.019010983407497406, 0.0243295319378376, -0.05771191045641899, 0.002489533508196473, 0.00814756378531456, 0.03507978841662407, -0.010750258341431618, -0.000031826421036385, ...
[ "False", "ModelFeatureFusionLayer", "ModelFeatureFusionStage", "Module", "ModuleList", "None", "ValueError", "_", "__init__", "append", "class", "config", "def", "does", "else", "f", "final", "for", "forward", "fused_hidden_state", "fused_hidden_states", "hidden_state", "...
depth_pro/modeling_depth_pro.py:DepthProFovEncoder
[ -0.0002578712592367083, 0.03414571285247803, 0.010983537882566452, 0.022649990394711494, -0.0009034386603161693, 0.03300752118229866, 0.049625102430582047, -0.055316053330898285, 0.0025893831625580788, 0.02390199899673462, 0.00984534714370966, 0.009788437746465206, 0.0010883945506066084, 0...
[ "AutoModel", "F", "False", "Linear", "ModelFovEncoder", "Module", "__init__", "align_corners", "base_height", "base_width", "batch_size", "bilinear", "class", "config", "def", "encodings", "exponent_value", "features", "forward", "fov_model_config", "from_config", "fusion_h...
depth_pro/modeling_depth_pro.py:DepthProFovHead
[ -0.00028598582139238715, 0.03248798847198486, 0.014642473310232162, 0.010753066278994083, -0.0008829811704345047, 0.031115256249904633, 0.05696837231516838, -0.047359250485897064, 0.005977103486657143, 0.013784516602754593, 0.014470881782472134, -0.01498565636575222, 0.004346984438598156, ...
[ "Conv2d", "F", "False", "ModelFovHead", "Module", "ModuleList", "ReLU", "True", "__init__", "align_corners", "append", "bilinear", "ceil", "class", "config", "def", "features", "final_in_channels", "final_kernel_size", "for", "forward", "fusion_hidden_size", "i", "image...
depth_pro/modeling_depth_pro.py:DepthProFovModel
[ -0.00012688309652730823, 0.038842201232910156, -0.006388519890606403, 0.01084628701210022, -0.00029635633109137416, 0.04429373890161514, 0.05860402062535286, -0.053606778383255005, 0.01232274528592825, 0.01203881110996008, 0.02816627360880375, 0.012152384035289288, 0.0056218975223600864, -...
[ "Conv2d", "ModelFovEncoder", "ModelFovHead", "ModelFovModel", "Module", "ReLU", "True", "__init__", "activation", "class", "config", "conv", "def", "flatten", "forward", "fov_encoder", "fov_features", "fov_output", "fusion_hidden_size", "global_features", "head", "inplace",...
depth_pro/modeling_depth_pro.py:DepthProDepthEstimationHead
[ -0.00022063942742533982, 0.02551973983645439, 0.027107635512948036, 0.015538685955107212, -0.0008896465296857059, 0.03924369066953659, 0.03992421552538872, -0.04332684725522995, 0.00771263288334012, 0.01264644879847765, 0.009697501547634602, -0.029262635856866837, 0.0019139805808663368, 0....
[ "Conv2d", "ConvTranspose2d", "ModelModelEstimationHead", "Module", "ModuleList", "ReLU", "True", "__init__", "class", "config", "def", "dim", "features", "for", "forward", "fusion_hidden_size", "hidden_states", "in", "in_channels", "kernel_size", "layer", "layers", "nn", ...
depth_pro/modeling_depth_pro.py:DepthProForDepthEstimation
[ -0.00019587513816077262, 0.04398999363183975, -0.024274270981550217, 0.0382918082177639, -0.0006909050280228257, 0.05926113203167915, 0.04353414103388786, -0.016638701781630516, 0.006752350367605686, 0.012934882193803787, 0.024388235062360764, 0.015043210238218307, 0.0060400767251849174, -...
[ "Model", "ModelFeatureFusionStage", "ModelForModelEstimation", "ModelFovModel", "ModelModel", "ModelModelEstimationHead", "ModelModelEstimatorOutput", "ModelPreTrainedModel", "Model_outputs", "None", "True", "__init__", "attentions", "auto_docstring", "class", "config", "def", "det...
mllama/modeling_mllama.py:_prepare_cross_attention_mask
[ -0.0000703664481989108, 0.033550720661878586, 0.02972278743982315, -0.01733829267323017, -0.0007036644383333623, 0.030398305505514145, 0.012271908111870289, -0.056743502616882324, 0.01812639646232128, 0.033550720661878586, 0.019027087837457657, 0.021503986790776253, 0.0032650032080709934, ...
[ "None", "_", "_prepare_cross_attention_mask", "any", "batch_size", "bool", "cross_attention_mask", "def", "dim", "dtype", "finfo", "full_text_row_masked_out_mask", "inverted_cross_attn_mask", "masked_fill", "min", "negative_inf_value", "num_vision_tokens", "repeat_interleave", "r...
mllama/modeling_mllama.py:_prepare_aspect_ratio_attention_mask
[ -0.00009095249697566032, 0.02384086884558201, 0.05102391913533211, -0.024397898465394974, -0.0006788799073547125, 0.030970849096775055, -0.01125199906527996, -0.04679049178957939, 0.009135286323726177, 0.028074294328689575, 0.009079583920538425, 0.009915128350257874, -0.0013438340974971652, ...
[ "_prepare_aspect_ratio_attention_mask", "aspect_ratio_mask", "attention_mask", "batch_size", "def", "dtype", "finfo", "max_num_tiles", "min", "num_patches", "pad_patches", "repeat", "reshape", "return", "shape", "target_length", "torch", "transpose", "unsqueeze", "view" ]
mllama/modeling_mllama.py:MllamaPrecomputedAspectRatioEmbedding
[ -0.0003803886938840151, -0.0005496525554917753, 0.04170079156756401, 0.0018346018623560667, -0.0016817183932289481, 0.030285492539405823, 0.01129881851375103, -0.03121735341846943, 0.002970307832583785, 0.006319184321910143, 0.022131705656647682, -0.0026791011914610863, -0.001215787720866501...
[ "Embedding", "ModelPrecomputedAspectRatioEmbedding", "Module", "Parameter", "True", "__init__", "aspect_ratio_ids", "class", "config", "def", "embedding", "embeddings", "forward", "gate", "hidden_size", "hidden_state", "if", "is_gated", "max_aspect_ratio_id", "max_num_tiles", ...
mllama/modeling_mllama.py:MllamaPrecomputedPositionEmbedding
[ -0.00032226037001237273, 0.012328709475696087, 0.01371136773377657, 0.003802312072366476, -0.0016419074963778257, 0.01947244629263878, 0.023620424792170525, -0.012155876494944096, -0.00024844653671607375, -0.006221965420991182, 0.0022468208335340023, 0.007835067808628082, -0.0035142581909894...
[ "Embedding", "ModelPrecomputedPositionEmbedding", "Module", "Parameter", "__init__", "aspect_ratio_ids", "batch_size", "class", "config", "def", "embedding", "forward", "gate", "gated_position_embedding", "gated_tile_position_embedding", "hidden_size", "hidden_state", "image_size",...
mllama/modeling_mllama.py:MllamaVisionMLP
[ -0.0001662031572777778, 0.03708730265498161, 0.032536715269088745, 0.034129418432712555, -0.00047283468302339315, 0.04709859937429428, 0.03162659704685211, -0.03481200709939003, 0.003341838950291276, 0, 0.049373894929885864, -0.020136358216404915, 0.0019482210045680404, -0.0018913387320935...
[ "ACT2FN", "Linear", "ModelVisionMLP", "Module", "__init__", "activation_fn", "class", "config", "def", "fc1", "fc2", "forward", "hidden_act", "hidden_size", "hidden_states", "intermediate_size", "nn", "return", "self", "super" ]
mllama/modeling_mllama.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
mllama/modeling_mllama.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
mllama/modeling_mllama.py:MllamaVisionAttention
[ -0.00008038675878196955, 0.03103719651699066, 0.03441080451011658, 0.01743030920624733, -0.0002934336371254176, 0.022715630009770393, 0.04588107392191887, -0.018779752776026726, 0.004132670350372791, 0.035985156893730164, 0.027888495475053787, 0.018554845824837685, -0.0011456211796030402, ...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelVisionAttention", "Module", "None", "_", "__init__", "_attn_implementation", "attention_heads", "attention_interface", "attention_mask", "attn_output", "attn_weights", "batch_size", "class", "config", "contiguous", "def", "dropout", "e...