identifier
stringlengths
24
102
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
xlstm/modeling_xlstm.py:xLSTMForCausalLM
[ -0.00038795583532191813, 0.037243761122226715, 0.01335153728723526, 0.00813975278288126, -0.0020056585781276226, 0.012121790088713169, 0.02693731151521206, 0.014171368442475796, -0.0020642178133130074, 0.02084713615477085, 0.012707384303212166, -0.025531886145472527, 0.0006258533103391528, ...
[ "CrossEntropyLoss", "GenerationMixin", "Linear", "ModelCausalLMOutput", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "Model_outputs", "None", "__init__", "and", "auto_docstring", "backbone", "cache_params", "can_return_tuple", "class", "config", "contiguous", "def", ...
beit/modeling_beit.py:BeitModelOutputWithPooling
[ -0.00009576975571690127, 0.016721311956644058, 0.037058040499687195, 0.02666371315717697, -0.00040955914300866425, 0.05038989707827568, 0.03389455005526543, 0.012540983036160469, 0.016721311956644058, -0.0041238367557525635, 0.021579530090093613, 0.010846256278455257, 0.0005119489505887032, ...
[ "BaseModelOutputWithPooling", "ModelModelOutputWithPooling", "class", "r" ]
beit/modeling_beit.py:drop_path
[ 0, 0.017753854393959045, 0.03775503486394882, -0.02123720571398735, 0, 0.043598074465990067, 0.0339345820248127, -0.027304979041218758, 0.014832334592938423, 0.0017486985307186842, 0.020113544538617134, -0.04944111406803131, 0.0016574009787291288, -0.018315685912966728, -0.05034004524350...
[ "False", "Model_path", "Model_prob", "def", "device", "div", "dtype", "floor_", "if", "input", "keep_prob", "ndim", "not", "or", "output", "rand", "random_tensor", "return", "shape", "torch", "training" ]
beit/modeling_beit.py:BeitDropPath
[ -0.00011107319733127952, 0.018094833940267563, 0.03596488758921623, -0.025063030421733856, -0.00047414645086973906, 0.031918834894895554, 0.033717080950737, -0.026861274614930153, 0.0066872211173176765, -0.002430439693853259, 0.009721758775413036, -0.033717080950737, -0.000818341679405421, ...
[ "ModelDropPath", "Module", "None", "__init__", "class", "def", "drop_path", "drop_prob", "extra_repr", "f", "forward", "hidden_states", "nn", "p", "return", "self", "super", "training" ]
beit/modeling_beit.py:BeitEmbeddings
[ -0.0002295338490512222, 0.026077890768647194, 0.018106482923030853, 0.006405595690011978, -0.0012882364680990577, 0.02004239708185196, 0.016967711970210075, -0.03256889432668686, 0.002192137064412236, -0.002049790695309639, 0.021978311240673065, 0.031202368438243866, -0.00170104147400707, ...
[ "Dropout", "False", "Iterable", "ModelEmbeddings", "ModelPatchEmbeddings", "Module", "None", "Parameter", "_", "__init__", "abc", "align_corners", "and", "batch_size", "bicubic", "bool_masked_pos", "cat", "class", "class_pos_embed", "cls_token", "cls_tokens", "collections",...
beit/modeling_beit.py:BeitPatchEmbeddings
[ -0.00008051706390688196, 0.008278737775981426, 0.01847228594124317, 0.010137230157852173, -0.00029566921875812113, -0.0061949738301336765, 0.016557475551962852, -0.01284049078822136, 0.007208697032183409, -0.0013023524079471827, 0.010249866172671318, 0.006392086856067181, -0.0036043485160917...
[ "Conv2d", "Iterable", "Make", "ModelPatchEmbeddings", "Module", "ValueError", "__init__", "abc", "batch_size", "channel", "class", "collections", "config", "configuration", "def", "dimension", "dtype", "else", "embeddings", "flatten", "forward", "height", "hidden_size", ...
beit/modeling_beit.py:BeitSelfAttention
[ -0.0000467752106487751, 0.04879309982061386, 0.04051170498132706, -0.006994423922151327, -0.00030250882264226675, 0.008505219593644142, 0.02193451300263405, -0.01001601479947567, 0.004252609796822071, 0.023277442902326584, 0.009064773097634315, 0.02193451300263405, 0.001203040941618383, -0...
[ "Dropout", "False", "Linear", "ModelRelativePositionBias", "ModelSelfAttention", "Module", "None", "The", "ValueError", "_", "__init__", "a", "all_head_size", "and", "attention", "attention_head_size", "attention_probs", "attention_probs_dropout_prob", "attention_scores", "batc...
beit/modeling_beit.py:BeitSdpaSelfAttention
[ -0.00009699069778434932, 0.06651192158460617, 0.03168304264545441, -0.0028087804093956947, -0.0005547341424971819, 0.013369794934988022, 0.0377500094473362, -0.019099706783890724, 0.009100448340177536, 0.02820015512406826, 0.007696058135479689, 0.036851197481155396, 0.0014465219574049115, ...
[ "False", "If", "ModelSdpaSelfAttention", "ModelSelfAttention", "None", "The", "True", "_", "__class__", "__name__", "all_head_size", "attention", "attention_head_size", "attention_probs_dropout_prob", "attn_bias", "attn_implementation", "attn_mask", "batch_size", "be", "class",...
beit/modeling_beit.py:BeitSelfOutput
[ -0.00004644762884709053, 0.05025457963347435, 0.04352405667304993, 0.006029427982866764, -0.0002962131693493575, 0.03522307798266411, 0.023220308125019073, -0.010544488206505775, 0.00046622901572845876, 0.012844083830714226, 0.011105365119874477, 0, 0.0031128674745559692, -0.00098854571115...
[ "Dropout", "Linear", "ModelSelfOutput", "Module", "None", "__init__", "class", "config", "def", "dense", "dropout", "forward", "gamma", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "nn", "return", "self", "super" ]
beit/modeling_beit.py:BeitAttention
[ 0.00008665907080285251, 0.044369444251060486, 0.0493244044482708, 0.002336715580895543, 0.0003695107589010149, 0.034909967333078384, 0.045045118778944016, -0.026914458721876144, 0.010191458277404308, 0.008614879101514816, 0.013569842092692852, 0.0328829362988472, 0.002970162546262145, 0.00...
[ "False", "ModelAttention", "ModelSelfOutput", "Model_SELF_ATTENTION_CLASSES", "Module", "None", "__init__", "_attn_implementation", "attention", "attention_output", "class", "config", "def", "forward", "hidden_states", "interpolate_pos_encoding", "nn", "output", "output_attention...
beit/modeling_beit.py:BeitIntermediate
[ -0.0002450938045512885, 0.02289927378296852, 0.038928765803575516, 0.012709097005426884, -0.0011449636658653617, 0.035035889595746994, 0.03389092534780502, -0.01969337649643421, -0.0022326791658997536, -0.0030055297538638115, 0.025418194010853767, -0.021868806332349777, -0.001474140794016420...
[ "ACT2FN", "Linear", "ModelIntermediate", "Module", "__init__", "class", "config", "def", "dense", "else", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_size", "isinstance", "nn", "return", "self", "str", "super" ]
beit/modeling_beit.py:BeitOutput
[ -0.00020534885697998106, 0.039880163967609406, 0.050983164459466934, 0.02016667276620865, -0.000977177289314568, 0.0276442039757967, 0.0428258553147316, -0.01948689855635166, -0.0018268966814503074, 0.013935398310422897, 0.01132959220558405, -0.0024783480912446976, 0.0008851243765093386, 0...
[ "Dropout", "Linear", "ModelOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "intermediate_size", "nn", "return", "self", "super" ]
beit/modeling_beit.py:BeitLayer
[ -0.00005970970232738182, 0.01910710521042347, 0.029896998777985573, 0.011689052917063236, 0, 0.04967847466468811, 0.020568236708641052, -0.004692480433732271, 0.0014400575309991837, 0.014836105518043041, 0.00021688672131858766, -0.0014189835637807846, 0.002163598546758294, 0.00781143410131...
[ "False", "GradientCheckpointingLayer", "Identity", "LayerNorm", "ModelAttention", "ModelDropPath", "ModelIntermediate", "ModelLayer", "ModelOutput", "None", "Parameter", "True", "__init__", "attention", "attention_output", "chunk_size_feed_forward", "class", "config", "def", "d...
beit/modeling_beit.py:BeitRelativePositionBias
[ -0.0002697355521377176, 0.021150125190615654, 0.03955645114183426, -0.02778097428381443, -0.0010432156268507242, 0.027895299717783928, 0.030867749825119972, -0.01874930039048195, 0.005659087561070919, 0.03292560204863548, 0.013204537332057953, 0.03429749980568886, 0.0016362766036763787, 0....
[ "False", "ModelRelativePositionBias", "Module", "None", "Parameter", "__init__", "align_corners", "arange", "bilinear", "cat", "class", "compile_compatible_method_lru_cache", "config", "contiguous", "coords", "coords_flatten", "def", "dim_size", "dtype", "flatten", "forward",...
beit/modeling_beit.py:BeitEncoder
[ -0.00022049856488592923, 0.03459693491458893, 0.029589485377073288, 0.010527028702199459, -0.0008357606711797416, 0.04119766876101494, 0.01616041176021099, -0.03118276409804821, 0.0019631485920399427, 0.012461725622415543, 0.005348868202418089, 0.005604931153357029, -0.00017337588360533118, ...
[ "BaseModelOutput", "False", "ModelEncoder", "ModelLayer", "ModelRelativePositionBias", "Module", "ModuleList", "None", "True", "__init__", "all_hidden_states", "all_self_attentions", "attentions", "class", "config", "cpu", "def", "device", "dim_size", "dpr", "drop_path_rate",...
beit/modeling_beit.py:BeitPreTrainedModel
[ -0.00023256278655026108, 0.04181159287691116, 0.0002591921074781567, 0.012668457813560963, -0.0011503868736326694, 0.023291783407330513, 0.007669245824217796, -0.004800379741936922, -0.0014699387829750776, 0.011361845768988132, -0.0010012626880779862, -0.005084426142275333, -0.00195991853252...
[ "Model", "ModelConfig", "ModelEmbeddings", "ModelLayer", "ModelPreTrainedModel", "ModelRelativePositionBias", "None", "PreTrainedModel", "True", "_init_weights", "_keys_to_ignore_on_load_unexpected", "_no_split_modules", "_supports_sdpa", "base_model_prefix", "class", "cls_token", "c...
beit/modeling_beit.py:BeitModel
[ 0, 0.03226597234606743, 0.002898896113038063, 0.03361038863658905, 0.00009321631659986451, 0.03809177502989769, 0.017701471224427223, -0.011427531950175762, 0.009018788114190102, 0.014900606125593185, 0.02431151457130909, 0.004313333425670862, -0.0006301947869360447, 0.0015544805210083723,...
[ "False", "Identity", "LayerNorm", "ModelEmbeddings", "ModelEncoder", "ModelModel", "ModelModelOutputWithPooling", "ModelPooler", "ModelPreTrainedModel", "None", "True", "_", "__init__", "add_pooling_layer", "attentions", "auto_docstring", "bool_masked_pos", "class", "config", "...
beit/modeling_beit.py:BeitPooler
[ -0.00010185079008806497, 0.02180309221148491, 0.01775715872645378, 0.037537284195423126, -0.0003863305610138923, 0.02899586595594883, 0.018768642097711563, -0.0029642090667039156, 0.003877354087308049, 0.006265579257160425, 0.015846578404307365, 0.004945031367242336, -0.0010395804420113564, ...
[ "LayerNorm", "ModelPooler", "Module", "None", "__init__", "class", "config", "def", "else", "eps", "forward", "hidden_size", "hidden_states", "if", "is", "layer_norm_eps", "layernorm", "mean", "nn", "not", "patch_tokens", "pooled_output", "return", "self", "super", ...
beit/modeling_beit.py:BeitForMaskedImageModeling
[ -0.0000917001671041362, 0.04940979555249214, 0.010116711258888245, 0.016320882365107536, -0.0004890675190836191, 0.04449117183685303, 0.004219954367727041, -0.024034176021814346, 0.0055334498174488544, 0.01755053736269474, 0.023028094321489334, 0.03666609153151512, 0.0001292535598622635, -...
[ "CrossEntropyLoss", "False", "LayerNorm", "Linear", "MaskedLMOutput", "Model", "ModelForMaskedImageModeling", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "add_pooling_layer", "attentions", "auto_docstring", "bool_masked_pos", "class", "config", "def", "else", "eps...
beit/modeling_beit.py:BeitForImageClassification
[ -0.00009717886860016733, 0.03235228359699249, 0.012159996666014194, 0.025993386283516884, -0.00009238529310096055, 0.005717429332435131, 0.025547148659825325, -0.0049365125596523285, 0.002816879888996482, 0.007976511493325233, 0.046408794820308685, 0.0017082564299926162, 0.000578715407755225...
[ "False", "Identity", "ImageClassifierOutput", "Linear", "Model", "ModelForImageClassification", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "add_pooling_layer", "attentions", "auto_docstring", "class", "classifier", "config", "def", "else", "forward", "h...
beit/modeling_beit.py:BeitConvModule
[ 0.00006325140566332266, -0.0026512136682868004, -0.005587504245340824, -0.018587004393339157, 0.0008053418132476509, 0.03763012960553169, 0.002166583202779293, -0.02736736834049225, 0.019157156348228455, -0.002907782793045044, 0.03991074487566948, -0.03603370115160942, 0.003591967048123479, ...
[ "BatchNorm2d", "Conv2d", "ModelConvModule", "Module", "ReLU", "__init__", "activation", "bn", "class", "conv", "def", "dilation", "forward", "in_channels", "input", "int", "kernel_size", "nn", "out_channels", "output", "padding", "return", "self", "str", "super" ]
beit/modeling_beit.py:BeitPyramidPoolingBlock
[ -0.00011145989992655814, -0.02651165798306465, 0.016176605597138405, 0.02662399597465992, -0.00028610965819098055, 0.02797204628586769, 0.019996080547571182, -0.030555808916687965, 0.009155509062111378, -0.0007091307197697461, 0.011346091516315937, -0.04583371430635452, -0.001116354251280427...
[ "AdaptiveAvgPool2d", "ModelConvModule", "ModelPyramidPoolingBlock", "Module", "__init__", "add_module", "channels", "class", "def", "enumerate", "for", "forward", "hidden_state", "i", "in", "in_channels", "input", "kernel_size", "layer", "layers", "nn", "pool_scale", "ret...
beit/modeling_beit.py:BeitPyramidPoolingModule
[ -0.0001824860810302198, -0.02573939599096775, 0.0063214595429599285, 0.04671643674373627, -0.0005882075638510287, 0.022791270166635513, 0.031068697571754456, -0.040593408048152924, 0.004393839277327061, 0.00024449589545838535, 0.013890202157199383, -0.036511387676000595, -0.00221109343692660...
[ "ModelPyramidPoolingBlock", "ModelPyramidPoolingModule", "Module", "__init__", "add_module", "align_corners", "append", "bilinear", "block", "blocks", "channels", "class", "def", "enumerate", "for", "forward", "functional", "i", "in", "in_channels", "interpolate", "mode", ...
beit/modeling_beit.py:BeitUperHead
[ -0.00043113011633977294, 0.015093239024281502, 0.02405484952032566, 0.04457222297787666, -0.0017318902537226677, 0.023701101541519165, 0.045279718935489655, -0.02653108537197113, -0.004569242242723703, -0.007959325797855854, 0.021696532145142555, 0.0035374779254198074, 0.0005527309258468449,...
[ "Conv2d", "False", "ModelConvModule", "ModelPyramidPoolingModule", "ModelUperHead", "Module", "ModuleList", "__init__", "align_corners", "append", "bilinear", "bottleneck", "cat", "channels", "class", "classifier", "config", "def", "dim", "encoder_hidden_states", "enumerate",...
beit/modeling_beit.py:BeitFCNHead
[ -0.0003740352694876492, 0.020103488117456436, 0.013828410767018795, 0.0018374937353655696, -0.001300044939853251, 0.03067815490067005, 0.0392773374915123, -0.03439671918749809, 0.004241487476974726, -0.007233769632875919, 0.02266000211238861, 0.00952882133424282, 0.002425782149657607, 0.01...
[ "Conv2d", "Identity", "ModelConvModule", "ModelFCNHead", "Module", "Sequential", "__init__", "append", "auxiliary_channels", "auxiliary_concat_input", "auxiliary_num_convs", "cat", "channels", "class", "classifier", "concat_input", "config", "conv_cat", "conv_padding", "convs",...
beit/modeling_beit.py:BeitForSemanticSegmentation
[ -0.00032050375011749566, 0.05036894604563713, -0.013845762237906456, 0.005982737056910992, -0.0007585255661979318, 0.023361163213849068, 0.02962879277765751, -0.02871713787317276, -0.007350219413638115, -0.0011823027161881328, 0.032819584012031555, 0.01407367642968893, -0.004843167960643768,...
[ "BatchNorm2d", "ConvTranspose2d", "CrossEntropyLoss", "False", "GELU", "Identity", "MaxPool2d", "Model", "ModelFCNHead", "ModelForSemanticSegmentation", "ModelModel", "ModelPreTrainedModel", "ModelUperHead", "None", "One", "SemanticSegmenterOutput", "Sequential", "The", "True", ...
beit/modeling_beit.py:BeitBackbone
[ -0.00028228244627825916, 0.00676057580858469, 0.027837665751576424, 0.029769258573651314, -0.0008486227015964687, 0.02170201577246189, 0.018179699778556824, -0.025224333629012108, -0.00211623078212142, 0.0038631861098110676, 0.0036217370070517063, 0.004402895923703909, -0.0022866653744131327...
[ "BackboneMixin", "BackboneOutput", "BatchNorm2d", "ConvTranspose2d", "GELU", "Identity", "MaxPool2d", "ModelBackbone", "ModelEmbeddings", "ModelEncoder", "ModelPreTrainedModel", "None", "One", "Sequential", "True", "ValueError", "_", "__init__", "_init_backbone", "a", "add_fp...
glmasr/modeling_glmasr.py:GlmAsrRotaryEmbedding
[ -0.00030098692514002323, 0.04780977591872215, 0.004612714983522892, -0.006150286644697189, -0.0016971309669315815, 0.03643754869699478, 0.03875841200351715, 0.012590681202709675, -0.004351617768406868, 0.02146798186004162, 0.002509433077648282, -0.0033362405374646187, -0.0013707596808671951,...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
glmasr/modeling_glmasr.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
glmasr/modeling_glmasr.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
glmasr/modeling_glmasr.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
glmasr/modeling_glmasr.py:apply_rotary_pos_emb
[ -0.00017082532576750964, 0.02744593471288681, 0.013950735330581665, -0.01417850237339735, -0.0008220968884415925, 0.02699040062725544, 0.049197692424058914, 0, 0.01417850237339735, 0.030065257102251053, 0.00009742382098920643, 0.011900831013917923, -0.001053422805853188, -0.015488162636756...
[ "Model_rotary_pos_emb", "None", "cat", "cos", "def", "dim", "k", "k_embed", "k_pass", "k_rot", "position_ids", "q", "q_embed", "q_pass", "q_rot", "return", "rotary_dim", "rotate_half", "shape", "sin", "torch", "unsqueeze", "unsqueeze_dim" ]
glmasr/modeling_glmasr.py:GlmAsrAttention
[ -0.00012337722000665963, 0.03316379711031914, 0.028877319768071175, -0.004201875533908606, -0.0004829336830880493, 0.03338940069079399, 0.046474434435367584, -0.002213739790022373, 0.003158456878736615, 0.010885396040976048, 0.01235182210803032, 0.025944465771317482, -0.0015369276516139507, ...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelAttention", "Module", "None", "Tensor", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "class", "config", "contiguous", "c...
glmasr/modeling_glmasr.py:GlmAsrMLP
[ -0.00021045222820248455, 0.025796448811888695, 0.04086340218782425, 0.017463969066739082, -0.0008132730145007372, 0.04748373106122017, 0.032416775822639465, -0.024198438972234726, 0.0019119050120934844, -0.007476404774934053, 0.0356127992272377, -0.036069370806217194, 0.00019350904040038586,...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "fc1", "fc2", "forward", "hidden_act", "hidden_size", "hidden_states", "intermediate_size", "nn", "return", "self", "super" ]
glmasr/modeling_glmasr.py:GlmAsrEncoderLayer
[ -0.00012072121171513572, 0.02402440272271633, 0.024362774565815926, 0.010997085832059383, -0.0005604284233413637, 0.04443950578570366, 0.028536027297377586, -0.007190402131527662, 0.006147088948637247, 0.005921507719904184, -0.0008988002664409578, 0.03045346774160862, -0.00046526131336577237...
[ "GradientCheckpointingLayer", "LayerNorm", "ModelAttention", "ModelEncoderLayer", "ModelMLP", "None", "Tensor", "_", "__init__", "class", "config", "def", "forward", "hidden_size", "hidden_states", "input_layernorm", "kwargs", "layer_idx", "mlp", "nn", "position_embeddings", ...
glmasr/modeling_glmasr.py:GlmAsrPreTrainedModel
[ -0.0003446676710154861, 0.034597378224134445, 0.01381573174148798, 0.016950393095612526, -0.001705197966657579, 0.02263922430574894, 0.03134661912918091, -0.03552616760134697, 0.001552818575873971, 0.00690786587074399, 0.01811137981712818, 0.008126901462674141, -0.0040634507313370705, 0.00...
[ "ModelAttention", "ModelConfig", "ModelPreTrainedModel", "PreTrainedModel", "True", "_no_split_modules", "_skip_keys_device_placement", "_supports_flash_attn", "_supports_sdpa", "audio", "base_model_prefix", "class", "config", "input_modalities", "model", "past_key_values", "supports...
glmasr/modeling_glmasr.py:GlmAsrEncoder
[ -0.000011097399692516774, 0.023627573624253273, 0.008100882172584534, 0.010013590566813946, -0.000094932212959975, 0.04477987810969353, 0.018114471808075905, -0.04005436226725578, 0.008719699457287788, -0.005738124717026949, 0.025090232491493225, 0.00753832096233964, -0.0013993711909279227, ...
[ "BaseModelOutputWithPooling", "Conv1d", "False", "LayerNorm", "ModelAttention", "ModelEncoder", "ModelEncoderConfig", "ModelEncoderLayer", "ModelPreTrainedModel", "ModelRotaryEmbedding", "ModuleList", "None", "__init__", "_can_record_outputs", "_no_split_modules", "arange", "attentio...
glmasr/modeling_glmasr.py:GlmAsrMultiModalProjector
[ -0.0003220324870198965, 0.036640141159296036, 0.03549513593316078, 0.015114057809114456, -0.0010376602876931429, 0.02679310366511345, 0.036182139068841934, -0.05083819478750229, -0.004007515497505665, -0.007184902671724558, 0.03870115056633949, -0.005696396809071302, -0.0051811449229717255, ...
[ "ACT2FN", "Linear", "ModelMultiModalProjector", "Module", "__init__", "act", "audio_config", "audio_features", "class", "config", "def", "forward", "hidden_size", "hidden_states", "intermediate_size", "linear_1", "linear_2", "nn", "projector_hidden_act", "return", "self", "...
glmasr/modeling_glmasr.py:GlmAsrForConditionalGeneration
[ -0.000293186487397179, 0.04366879165172577, -0.005259587895125151, 0.009040804579854012, -0.0010519175557419658, 0.015238589607179165, 0.0191050972789526, -0.04730786010622978, -0.01228184811770916, -0.019673701375722885, 0.04912739247083664, -0.0057997615076601505, -0.0036674963776022196, ...
[ "AutoModel", "AutoModelForCausalLM", "Compute", "GenerationMixin", "ModelForConditionalGeneration", "ModelMultiModalProjector", "ModelPreTrainedModel", "None", "True", "__init__", "_keep_in_fp32_modules_strict", "_pp_plan", "_tp_plan", "and", "arange", "args", "attention_mask", "au...
granite/modeling_granite.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
granite/modeling_granite.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
granite/modeling_granite.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
granite/modeling_granite.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
granite/modeling_granite.py:GraniteAttention
[ -0.0001020252238959074, 0.0346745029091835, 0.03309838846325874, -0.004897210747003555, -0.00039051033672876656, 0.03309838846325874, 0.04187959432601929, -0.008218307979404926, 0.00227973610162735, 0.010357319377362728, 0.01666177436709404, 0.02488008141517639, -0.0009006364271044731, -0....
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attention_multiplier", "attn_output", "attn_weights", "cache_kwargs", ...
granite/modeling_granite.py:GraniteRMSNorm
[ -0.00009884802420856431, 0.04179859161376953, 0.03230918198823929, 0.053095508366823196, -0.00046952810953371227, 0.03931327164173126, 0.022028988227248192, -0.02937198430299759, 0.007964326068758965, 0.042476408183574677, 0.019995542243123055, 0.006975846365094185, 0.0028524715453386307, ...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tuple", ...
granite/modeling_granite.py:GraniteMLP
[ -0.0002434849739074707, 0.024471141397953033, 0.0228551235049963, 0.026548879221081734, -0.0009450823999941349, 0.060485273599624634, 0.036244992166757584, -0.005078915972262621, 0, -0.005021201446652412, 0.028164898976683617, -0.04640282690525055, -0.0015583038330078125, 0.009176678024232...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
granite/modeling_granite.py:GraniteDecoderLayer
[ -0.00013429038517642766, 0.045687004923820496, 0.01583212986588478, 0, -0.0006679180078208447, 0.03822328522801399, 0.04297292232513428, -0.036640070378780365, 0.006050135474652052, -0.004438650794327259, -0.007746435236185789, 0.023069674149155617, -0.0016962996451184154, -0.0017528429161...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "ModelRMSNorm", "None", "Tensor", "__init__", "attention_mask", "cache_position", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "if", "input_layernorm", "kw...
granite/modeling_granite.py:GranitePreTrainedModel
[ -0.00034975787275470793, 0.027630871161818504, 0.016904963180422783, 0.011250545270740986, -0.0018799485405907035, 0.029612833634018898, 0.03077869303524494, -0.03241089731454849, -0.00408050836995244, 0.003993069287389517, 0.006383080966770649, 0.004459412768483162, -0.004255387466400862, ...
[ "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn", "_supports_flex_attn", "_supp...
granite/modeling_granite.py:GraniteRotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
granite/modeling_granite.py:GraniteModel
[ -0.00012166404485469684, 0.04694468900561333, 0.0009380474220961332, -0.0021441085264086723, -0.0008040406391955912, 0.04536481946706772, 0.03972243145108223, -0.012300411239266396, 0.01111550908535719, 0.007052988279610872, 0.009140673093497753, -0.0026237117126584053, -0.000701772340107709...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "None", "Setting", "True", "ValueError", "You", "__init__", "all_hidden_states", "all_self_attns", "an...
granite/modeling_granite.py:GraniteForCausalLM
[ -0.0002744296216405928, 0.03875300660729408, 0.010198159143328667, -0.0013172621838748455, -0.0010693903313949704, 0.026741839945316315, 0.03580687195062637, -0.007478650193661451, 0.002988627180457115, 0.029914600774645805, 0.02390901744365692, 0.006997070275247097, 0.0012039493303745985, ...
[ "CausalLMOutputWithPast", "GenerationMixin", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "cache_position", "can_return_tuple", "class", "colwi...
olmoe/modeling_olmoe.py:OlmoeRMSNorm
[ -0.0000997350289253518, 0.04134855419397354, 0.03118087723851204, 0.05332382023334503, -0.00047307941713370383, 0.03931501880288124, 0.023159708827733994, -0.030954929068684578, 0.007512783631682396, 0.041800450533628464, 0.02169104479253292, 0.007202104665338993, 0.0027678676415234804, 0....
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tuple", ...
olmoe/modeling_olmoe.py:OlmoeRotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
olmoe/modeling_olmoe.py:OlmoeMLP
[ -0.0002434849739074707, 0.024471141397953033, 0.0228551235049963, 0.026548879221081734, -0.0009450823999941349, 0.060485273599624634, 0.036244992166757584, -0.005078915972262621, 0, -0.005021201446652412, 0.028164898976683617, -0.04640282690525055, -0.0015583038330078125, 0.009176678024232...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
olmoe/modeling_olmoe.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
olmoe/modeling_olmoe.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
olmoe/modeling_olmoe.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
olmoe/modeling_olmoe.py:eager_attention_forward
[ 0, 0.021594731137156487, 0.01775064319372177, -0.018768195062875748, -0.00008832923776935786, 0.039797618985176086, 0.05359111353754997, -0.035049039870500565, 0.02069023996591568, 0.011645326390862465, 0.029395969584584236, 0.022499222308397293, 0.002741739386692643, -0.014584923163056374...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "causal_mask", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", ...
olmoe/modeling_olmoe.py:OlmoeAttention
[ -0.00003597145405365154, 0.049637097865343094, 0.02560465782880783, 0.007243422791361809, -0.00010177290096180514, 0.027289174497127533, 0.026390764862298965, -0.014037641696631908, 0.0036217113956809044, 0.024369345977902412, 0.021112613379955292, 0.03593636304140091, 0.001838930998928845, ...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "ModelRMSNorm", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_kwargs", "cache...
olmoe/modeling_olmoe.py:OlmoeExperts
[ -0.00033012471976689994, 0.03738027438521385, -0.010854209773242474, 0.006558961234986782, -0.001262454898096621, 0.05874042958021164, 0.0631517693400383, -0.01555576641112566, -0.005252973176538944, -0.018806224688887596, 0.011492692865431309, -0.016020117327570915, -0.0018283829558640718, ...
[ "ACT2FN", "ModelExperts", "Module", "None", "Parameter", "__init__", "act_fn", "chunk", "class", "config", "continue", "current_hidden_states", "current_state", "def", "dim", "down_proj", "dtype", "empty", "expert_hit", "expert_idx", "expert_mask", "final_hidden_states", ...
olmoe/modeling_olmoe.py:OlmoeTopKRouter
[ -0.00036532338708639145, 0.038302045315504074, 0.0029519598465412855, -0.003818455385044217, -0.0016155003104358912, 0.05780554190278053, 0.07660409063100815, -0.025495532900094986, -0.0035687871277332306, 0.01621374860405922, 0.022440768778324127, -0.02114836871623993, -0.001696275430731475...
[ "F", "ModelTopKRouter", "Module", "Parameter", "True", "__init__", "class", "config", "def", "dim", "dtype", "float", "forward", "functional", "hidden_dim", "hidden_size", "hidden_states", "if", "keepdim", "linear", "nn", "norm_topk_prob", "num_experts", "num_experts_pe...
olmoe/modeling_olmoe.py:OlmoeSparseMoeBlock
[ -0.00038656569086015224, 0.02426896244287491, 0.008128924295306206, -0.008423450402915478, -0.0014579049311578274, 0.04453236609697342, 0.05042289197444916, -0.03510752692818642, -0.0020616836845874786, -0.012782438658177853, 0.01260572299361229, -0.029452623799443245, 0.0012296470813453197,...
[ "ModelExperts", "ModelSparseMoeBlock", "ModelTopKRouter", "Module", "_", "__init__", "batch_size", "class", "config", "def", "experts", "final_hidden_states", "forward", "gate", "hidden_dim", "hidden_states", "nn", "reshape", "return", "self", "sequence_length", "shape", ...
olmoe/modeling_olmoe.py:OlmoeDecoderLayer
[ -0.00021539926819968969, 0.0460653081536293, 0.014749974943697453, -0.00028188052237965167, -0.000776501081418246, 0.04198069870471954, 0.04243454337120056, -0.039484549313783646, 0.006353835575282574, -0.006609123665839434, 0.0010282434523105621, 0.015998050570487976, -0.0018579295137897134...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelRMSNorm", "ModelSparseMoeBlock", "None", "Tensor", "_", "__init__", "attention_mask", "cache_position", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "input_layerno...
olmoe/modeling_olmoe.py:OlmoePreTrainedModel
[ -0.00040418931166641414, 0.03815839812159538, -0.006818179972469807, 0.0043308609165251255, -0.001624072901904583, 0.04564961418509483, 0.03815839812159538, -0.015567689202725887, -0.009773699566721916, -0.011061253026127815, 0.005179475527256727, -0.01129535399377346, -0.003174989251419902,...
[ "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelExperts", "ModelPreTrainedModel", "ModelTopKRouter", "OutputRecorder", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_init_weights", "_no_split_modules", "_skip_keys_device_placement", "_supports_a...
olmoe/modeling_olmoe.py:OlmoeModel
[ -0.00018841371638700366, 0.053466834127902985, -0.00685399305075407, -0.007394349668174982, -0.0010451629059389234, 0.04595872759819031, 0.03822309896349907, -0.01086400542408228, 0.009157617576420307, 0.0006292306934483349, 0.020590418949723244, -0.007963145151734352, -0.001962346490472555,...
[ "DynamicCache", "Embedding", "False", "ModelDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "MoeModelOutputWithPast", "None", "ValueError", "You", "__init__", "and", "arange", "attention_mask", "auto_docstring", "cache_po...
olmoe/modeling_olmoe.py:load_balancing_loss_func
[ -0.00027690583374351263, 0.01840798556804657, 0.001643570140004158, -0.028812499716877937, -0.0009432663209736347, 0.05808233842253685, 0.038874007761478424, -0.011547867208719254, 0, -0.02252405695617199, 0.031556546688079834, -0.0061455233953893185, -0.0008396499906666577, 0.014349081553...
[ "Model_balancing_loss_func", "None", "_", "attention_mask", "batch_size", "cat", "compute_device", "concatenated_gate_logits", "def", "device", "dim", "else", "expand", "expert_attention_mask", "expert_mask", "float", "for", "functional", "gate_logits", "if", "in", "is", ...
olmoe/modeling_olmoe.py:OlmoeForCausalLM
[ -0.00044288873323239386, 0.0379493422806263, -0.00161050446331501, -0.01030722912400961, -0.001698350184597075, 0.04966210201382637, 0.04029189422726631, -0.004597258288413286, -0.005153614562004805, 0.006207762751728296, 0.02857913449406624, -0.0060027893632650375, -0.00006588427640963346, ...
[ "GenerationMixin", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "MoeCausalLMOutputWithPast", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "aux_loss", "cache_position", "can_return_tuple", ...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerGatedAttention
[ 0.00009999789472203702, 0.013585679233074188, 0.03592906892299652, -0.004715690389275551, 0.0002842045505531132, 0.025262625887989998, 0.04042020067572594, -0.027620472013950348, 0.016280358657240868, -0.0064560044556856155, 0.027059080079197884, -0.019760986790060997, 0.0015227749245241284,...
[ "Linear", "ModelGatedAttention", "Module", "Softmax", "__init__", "attn_layer", "attn_softmax", "attn_weight", "class", "def", "dim", "forward", "in_size", "inputs", "nn", "out_size", "return", "self", "super" ]
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerBatchNorm
[ -0.00005249693276709877, 0.011583590880036354, 0.04318542778491974, 0.011977207846939564, 0.0000896181954885833, 0.029015207663178444, 0.00024073907115962356, -0.017206693068146706, 0.012258362956345081, 0.030139828100800514, 0.026203656569123268, -0.0014620065921917558, -0.00170098850503563...
[ "BatchNorm1d", "ModelBatchNorm", "Module", "__init__", "batchnorm", "class", "config", "d_model", "def", "eps", "forward", "inputs", "nn", "norm_eps", "output", "return", "self", "super", "transpose" ]
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerPositionalEncoding
[ -0.00006590608973056078, 0.05174067243933678, 0.010910533368587494, 0.029919607564806938, -0.0006573033751919866, 0.012091570533812046, 0.023170823231339455, 0.009392056614160538, 0.0013216367224231362, 0.019346512854099274, 0.008604698814451694, -0.0054271467961370945, 0.0017364057712256908...
[ "Available", "False", "ModelPositionalEncoding", "Model_input", "Module", "Parameter", "True", "ValueError", "__init__", "_init_pe", "a", "and", "arange", "are", "class", "config", "cos", "d_model", "def", "div_term", "elif", "else", "encoder", "exp", "f", "forward"...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerNormLayer
[ -0.00017537000530865043, 0.04330753535032272, 0.01859276369214058, 0.05895266681909561, -0.0003755398211069405, 0.041720349341630936, -0.007227370049804449, -0.022333990782499313, 0.0008821642841212451, 0.03491811826825142, 0.026528699323534966, -0.011110310442745686, 0.00043399649439379573,...
[ "LayerNorm", "ModelBatchNorm", "ModelNormLayer", "Module", "__init__", "batch", "class", "config", "d_model", "def", "else", "eps", "forward", "if", "in", "inputs", "inputs_reshaped", "lower", "nn", "norm", "norm_eps", "norm_mlp", "reshape", "return", "self", "shape...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerMLP
[ -0.00031441409373655915, 0.026403512805700302, 0.021634597331285477, 0.03163769096136093, -0.0012576563749462366, 0.061414338648319244, 0.03442925214767456, -0.0027334033511579037, -0.0021082099992781878, -0.02093670703470707, 0.03582502901554108, -0.050015464425086975, -0.000383476144634187...
[ "Dropout", "Linear", "ModelMLP", "Module", "__init__", "class", "config", "def", "dropout", "dropout1", "dropout2", "expansion_factor", "fc1", "fc2", "forward", "functional", "gelu", "in_features", "inputs", "nn", "num_hidden", "out_features", "return", "self", "super...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerChannelFeatureMixerBlock
[ -0.0001421989145455882, 0.008535468019545078, -0.004409049637615681, 0.0027980508748441935, -0.00028086494421586394, 0.03730734437704086, 0.012266202829778194, -0.03436797857284546, 0.0035187609028071165, -0.002939366502687335, 0.02283661626279354, -0.01317062322050333, -0.000277332059340551...
[ "ModelChannelFeatureMixerBlock", "ModelGatedAttention", "ModelMLP", "ModelNormLayer", "Module", "__init__", "class", "config", "def", "forward", "gated_attn", "gating_block", "if", "in_features", "in_size", "inputs", "mlp", "nn", "norm", "num_input_channels", "out", "out_fe...
patchtsmixer/modeling_patchtsmixer.py:eager_attention_forward
[ 0.00002095530362566933, 0.025862814858555794, 0.0269921962171793, -0.01146321278065443, 0.00008955634984886274, 0.03704368323087692, 0.060083046555519104, -0.023942869156599045, 0.021458230912685394, 0.014569009654223919, 0.023152301087975502, 0.026879258453845978, 0.003063444746658206, -0...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "shape", "size", "softmax", "tor...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerAttention
[ -0.00012168648390797898, 0.04424449801445007, 0.04559892416000366, -0.005925602745264769, -0.0006137231248430908, 0.02426675334572792, 0.03656943142414093, -0.019526271149516106, -0.001551943481899798, 0.028894366696476936, 0.01636595092713833, 0.0032872986048460007, -0.0034989272244274616, ...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelAttention", "Module", "None", "ValueError", "__init__", "_attn_implementation", "and", "attention_interface", "attention_mask", "attn_output", "attn_weights", "be", "bsz", "by", "class", "config", "contiguous", "current_stat...
patchtsmixer/modeling_patchtsmixer.py:PatchMixerBlock
[ -0.0001553682959638536, 0.01548033207654953, 0.007683668285608292, -0.00009180853521684185, -0.00018891372019425035, 0.04022626206278801, 0.012203473597764969, -0.02915274165570736, 0.0008898365776985884, 0.006129985209554434, 0.01920917071402073, -0.011469004675745964, 0.0002489423786755651...
[ "False", "ModelAttention", "ModelGatedAttention", "ModelMLP", "ModelMixerBlock", "ModelNormLayer", "Module", "_", "__init__", "batch_size", "class", "config", "d_model", "def", "dropout", "embed_dim", "forward", "gated_attn", "gating_block", "hidden_state", "hidden_state_resh...
patchtsmixer/modeling_patchtsmixer.py:FeatureMixerBlock
[ -0.00007624268619110808, 0.0066000609658658504, 0.004964148160070181, -0.005584666971117258, -0.00009254892938770354, 0.049641482532024384, 0.01370781846344471, -0.032718248665332794, 0.004146192222833633, -0.0023833552841097116, 0.024820741266012192, -0.017374519258737564, 0.002016685204580...
[ "ModelGatedAttention", "ModelMLP", "ModelMixerBlock", "ModelNormLayer", "Module", "__init__", "class", "config", "d_model", "def", "forward", "gated_attn", "gating_block", "hidden", "if", "in_Models", "in_size", "mlp", "nn", "norm", "out", "out_Models", "out_size", "res...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerLayer
[ -0.00011139930575154722, 0.016344435513019562, 0.011159304529428482, 0.019500603899359703, 0.00009554802090860903, 0.05500748008489609, 0.00935578066855669, -0.04531354084610939, 0.00402974896132946, 0.0036352279130369425, 0.022544050589203835, -0.03381607308983803, 0.0008806269615888596, ...
[ "FeatureMixerBlock", "ModelChannelFeatureMixerBlock", "ModelLayer", "ModelMixerBlock", "Model_mixer", "Module", "__init__", "channel_feature_mixer", "class", "config", "def", "feature_mixer", "forward", "hidden", "if", "mix_channel", "mode", "nn", "return", "self", "super" ]
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerBlock
[ -0.00002216262510046363, -0.0005680688773281872, 0.021113814786076546, 0.026533402502536774, 0.0002840344386640936, 0.06277690082788467, 0.018404019996523857, -0.025404321029782295, 0.008242291398346424, -0.002512205159291625, 0.01851692795753479, -0.039969466626644135, 0.000910321541596204,...
[ "False", "ModelBlock", "ModelLayer", "Module", "ModuleList", "None", "_", "__init__", "all_hidden_states", "class", "config", "def", "else", "embedding", "for", "forward", "hidden_state", "if", "in", "mixers", "mod", "nn", "num_layers", "output_hidden_states", "range"...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerForPredictionHead
[ -0.000492641469463706, 0.008474914357066154, 0.028328735381364822, 0.03105493262410164, -0.00171128090005368, 0.016475709155201912, 0.05286450311541557, -0.046226806938648224, -0.010549194179475307, 0.004207824822515249, 0.012208618223667145, -0.014757019467651844, -0.0028595428448170424, ...
[ "Dropout", "Flatten", "Linear", "ModelForPredictionHead", "Module", "None", "__init__", "base_forecast_block", "class", "config", "d_model", "def", "distribution_output", "dropout_layer", "else", "flatten", "for", "forecast", "forward", "get_parameter_projection", "head_dropo...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerLinearHead
[ -0.0004545707197394222, 0.010264500044286251, 0.022288627922534943, 0.04270032048225403, -0.0016936424653977156, 0.04058876633644104, 0.027450205758213997, -0.04035414755344391, -0.00956064835190773, 0.02475210838019848, 0.014135682955384254, -0.020529000088572502, -0.00009852086805040017, ...
[ "Dropout", "Flatten", "Linear", "ModelLinearHead", "Module", "None", "__init__", "and", "avg_pool", "class", "config", "d_model", "def", "dim", "distribution_output", "dropout", "elif", "else", "flatten", "forward", "get_parameter_projection", "head_aggregation", "head_dr...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerPreTrainedModel
[ -0.00024552049580961466, 0.04395172744989395, -0.004867710638791323, 0.014176140539348125, -0.0012525103520601988, 0.030515708029270172, 0.007145002484321594, 0.006888807285577059, -0.0021064947359263897, 0.011215660721063614, -0.0013877245364710689, 0.0006974205607548356, -0.001850299420766...
[ "BatchNorm1d", "False", "LayerNorm", "Linear", "ModelBatchNorm", "ModelConfig", "ModelPositionalEncoding", "ModelPreTrainedModel", "None", "PreTrainedModel", "_init_weights", "base_model_prefix", "batchnorm", "bias", "class", "config", "def", "elif", "getattr", "if", "init", ...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerPretrainHead
[ -0.0004220561822876334, 0.011985664255917072, 0.040225058794021606, 0.018124664202332497, -0.0016078330809250474, 0.04116052761673927, 0.05449092388153076, -0.023971328511834145, -0.005817432422190905, 0.0159029308706522, 0.013915064744651318, -0.019995596259832382, 0.00028137079789303243, ...
[ "Dropout", "Linear", "ModelPretrainHead", "Model_length", "Module", "__init__", "base_pt_block", "class", "config", "d_model", "def", "dropout_layer", "forecast", "forward", "head_dropout", "hidden_features", "nn", "return", "self", "super" ]
patchtsmixer/modeling_patchtsmixer.py:random_masking
[ 0.00004487325713853352, 0.016026537865400314, 0.024432066828012466, -0.017707644030451775, 0.000383502192562446, 0.03025989793241024, -0.00941419042646885, -0.05289878323674202, 0.012216033414006233, 0.0052674636244773865, 0.014569580554962158, 0.002675759606063366, -0.0014219350414350629, ...
[ "False", "Mask", "Model_masking", "None", "ValueError", "and", "argsort", "batch_size", "be", "between", "bool", "channel_consistent_masking", "def", "device", "dim", "else", "f", "gather", "has", "ids_restore", "ids_shuffle", "if", "index", "inputs", "inputs_mask", ...
patchtsmixer/modeling_patchtsmixer.py:forecast_masking
[ 0.00015528762014582753, 0.012987691909074783, 0.007340869400650263, -0.013947651721537113, 0.0007552625029347837, 0.03342919051647186, -0.00703029427677393, -0.042238231748342514, 0.01264888234436512, -0.007623210549354553, 0.01208420004695654, -0.003077518194913864, -0.0023998995311558247, ...
[ "Model_mask_ratios", "Model_masking", "None", "ValueError", "_", "and", "append", "batch1", "batch2", "batch_size", "be", "bool", "def", "device", "elif", "f", "for", "greater", "if", "in", "inputs", "inputs_mask", "int", "is", "isinstance", "key", "lambda", "le...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerPatchify
[ -0.0002431098255328834, 0.011470524594187737, -0.005280984099954367, 0.011640879325568676, -0.0006246325210668147, 0.040430761873722076, 0.007126489654183388, -0.027029553428292274, -0.0034070867113769054, 0.008120222948491573, 0.030890919268131256, -0.02930094487965107, 0.000068319182901177...
[ "Input", "Model", "ModelModelify", "Model_length", "Model_stride", "Module", "Sequence", "ValueError", "__init__", "be", "class", "config", "configuration", "context_length", "contiguous", "def", "dimension", "doesn", "f", "forward", "greater", "has", "if", "length", ...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerMasking
[ -0.00002322904583706986, 0.025245150551199913, 0.02176693081855774, 0.00048035912914201617, 0, 0.04286065697669983, -0.005077080335468054, -0.04039224237203598, 0.009368755854666233, -0.001157069462351501, 0.02221573330461979, 0.004375826101750135, 0.000575028476305306, -0.0210937261581420...
[ "ModelMasking", "Model_input", "Module", "None", "__init__", "bool", "channel_consistent_masking", "class", "config", "def", "elif", "else", "forecast", "forecast_masking", "forward", "if", "inputs", "is", "mask", "mask_ratio", "mask_type", "mask_value", "masked_input", ...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerStdScaler
[ -0.0002436667709844187, 0.061923377215862274, -0.014057972468435764, 0.022993605583906174, -0.0008928519673645496, 0.037108492106199265, 0.008366485126316547, -0.0015224727103486657, 0.008594145067036152, 0.02766062505543232, 0.028457432985305786, 0.02811594493687153, 0.0031445464119315147, ...
[ "ModelStdScaler", "Module", "True", "__init__", "clamp_min", "class", "config", "data", "def", "denominator", "dim", "else", "forward", "hasattr", "if", "keepdim", "loc", "minimum_scale", "nn", "observed_indicator", "return", "scale", "scaling_dim", "self", "sqrt", ...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerMeanScaler
[ -0.0002414414193481207, 0.05749146267771721, -0.0013563326792791486, 0.029995545744895935, -0.001086486387066543, 0.03158621862530708, 0.002783677540719509, 0.005822998937219381, 0.002556438557803631, 0.030222784727811813, 0.01590672880411148, 0.007072813343256712, 0.002954106777906418, 0....
[ "ModelMeanScaler", "Module", "None", "True", "__init__", "abs", "batch_observations", "batch_sum", "clamp", "class", "config", "data", "def", "default_scale", "dim", "else", "forward", "hasattr", "if", "is", "keepdim", "min", "minimum_scale", "nn", "not", "num_obser...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerNOPScaler
[ -0.00023304640490096062, 0.0414431206882, -0.010189997963607311, 0.04121541231870651, -0.0010531562147662044, 0.033473290503025055, 0.017192063853144646, -0.008823741227388382, 0.010588490404188633, 0.03324558213353157, 0.003586424048990011, -0.0070874569937586784, 0.0005479258834384382, 0...
[ "False", "ModelNOPScaler", "Module", "None", "True", "__init__", "class", "config", "data", "def", "dim", "else", "forward", "hasattr", "if", "keepdim", "loc", "mean", "nn", "observed_indicator", "ones_like", "requires_grad", "return", "scale", "scaling_dim", "self"...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerEncoderOutput
[ -0.00010860479960683733, 0.01770302653312683, 0.0374487079679966, 0.025646692141890526, -0.0008333756122738123, 0.04925072565674782, 0.048115916550159454, -0.04266883060336113, 0.018156949430704117, -0.013560972176492214, 0.008511070162057877, 0.028824158012866974, -0.0012695679906755686, ...
[ "ModelEncoderOutput", "ModelOutput", "None", "class", "hidden_states", "last_hidden_state", "r" ]
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerEncoder
[ -0.0002455018402542919, 0.015598260797560215, 0.0055789402686059475, 0.029374826699495316, -0.0008574774255976081, 0.049413468688726425, 0.019469361752271652, -0.028463980183005333, 0.00022415384592022747, 0.007742202840745449, 0.013776566833257675, 0.00177188275847584, 0.0008788253762759268...
[ "False", "Linear", "ModelBlock", "ModelEncoder", "ModelEncoderOutput", "ModelPositionalEncoding", "ModelPreTrainedModel", "Model_length", "Modeler", "Modeles", "None", "__init__", "auto_docstring", "class", "config", "d_model", "def", "else", "for", "forward", "hidden_states"...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerModelOutput
[ -0.00021177827147766948, 0.02157074399292469, 0.0021340041421353817, 0.015457110479474068, -0.001427475712262094, 0.06413546949625015, 0.05121609568595886, -0.013150079175829887, 0.013034727424383163, -0.0021772608160972595, 0.014591973274946213, 0.015572462230920792, -0.0016221314435824752,...
[ "ModelModelOutput", "ModelOutput", "Model_input", "None", "class", "hidden_states", "last_hidden_state", "loc", "mask", "r", "scale" ]
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerModel
[ -0.00023016115301288664, 0.03083088807761669, -0.012275260873138905, 0.026034971699118614, -0.0010847905650734901, 0.04910104349255562, 0.024664711207151413, -0.022038375958800316, 0.006765667349100113, 0.007193874102085829, 0.01804177835583687, 0.005709423683583736, 0.0020411189179867506, ...
[ "False", "ModelEncoder", "ModelEncoderOutput", "ModelMasking", "ModelMeanScaler", "ModelModel", "ModelModelOutput", "ModelModelify", "ModelNOPScaler", "ModelPreTrainedModel", "ModelStdScaler", "Model_input", "Modeled_x", "Modeling", "None", "True", "__init__", "auto_docstring", "...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerForPreTrainingOutput
[ -0.00015036678814794868, 0.02329893782734871, 0.04412166029214859, 0.0278011467307806, -0.0007702999864704907, 0.046147651970386505, 0.04704809561371803, -0.008554198779165745, 0.015532624907791615, -0.04051988944411278, 0.004192683380097151, 0.024199379608035088, -0.0002831468009389937, 0...
[ "ModelForPreTrainingOutput", "ModelOutput", "None", "class", "hidden_states", "last_hidden_state", "loss", "prediction_outputs", "r" ]
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerForPretraining
[ -0.00031170580768957734, 0.028156831860542297, 0.020747138187289238, 0.018695224076509476, -0.0011827008565887809, 0.05038590729236603, 0.02599092200398445, -0.017441276460886, 0.0020091666374355555, -0.0074096922762691975, 0.012482482008635998, 0.015845341607928276, -0.00021908465714659542,...
[ "False", "MSELoss", "ModelForPreTrainingOutput", "ModelForPretraining", "ModelModel", "ModelModelOutput", "ModelPreTrainedModel", "ModelPretrainHead", "Model_input", "None", "True", "__init__", "and", "auto_docstring", "class", "config", "def", "dim", "else", "for", "forward"...
patchtsmixer/modeling_patchtsmixer.py:PatchTSMixerForPredictionOutput
[ -0.00035307841608300805, 0.005765734240412712, 0.0016962324734777212, 0.02562548592686653, -0.001659832545556128, 0.0489213801920414, 0.0712854415178299, 0.0029847866389900446, 0.011240269988775253, -0.011298509314656258, 0.006406371481716633, 0.016889525577425957, -0.0014341536443680525, ...
[ "ModelForPredictionOutput", "ModelOutput", "None", "class", "hidden_states", "last_hidden_state", "loc", "loss", "prediction_outputs", "r", "scale" ]
patchtsmixer/modeling_patchtsmixer.py:SamplePatchTSMixerPredictionOutput
[ -0.00012928288197144866, 0.007820729166269302, -0.006120570469647646, 0.03332310542464256, -0.0004232686187606305, 0.0346832312643528, 0.06211245432496071, -0.02595575340092182, 0.014621363021433353, -0.006687290035188198, 0.028676006942987442, 0.0035419967025518417, -0.0028619333170354366, ...
[ "ModelModelPredictionOutput", "ModelOutput", "None", "class", "r", "sequences" ]
patchtsmixer/modeling_patchtsmixer.py:SamplePatchTSMixerRegressionOutput
[ -0.0001715865801088512, 0.015099619515240192, 0.0029741674661636353, 0.03843539580702782, -0.0006434496608562768, 0.05856822058558464, 0.04415494576096535, -0.02104795351624489, 0.013440948911011219, 0.001987544586881995, 0.03637635335326195, 0.009608848951756954, -0.001394140999764204, -0...
[ "ModelModelRegressionOutput", "ModelOutput", "None", "class", "r", "sequences" ]
patchtsmixer/modeling_patchtsmixer.py:nll
[ 0.00003988773823948577, 0.028024237602949142, 0.02416665107011795, 0.009019947610795498, -0.00017018768994603306, 0.06262906640768051, 0.04243346303701401, 0.025414694100618362, 0.01667839288711548, -0.020082145929336548, 0.025301234796643257, -0.028931906446814537, 0.001205496140755713, 0...
[ "Model", "def", "input", "log_prob", "return", "target" ]